diff --git a/core/add_project.sh b/core/add_project.sh index e5640839..d9c57f27 100755 --- a/core/add_project.sh +++ b/core/add_project.sh @@ -48,6 +48,13 @@ function setup_alluxio() { mvn clean install -DskipTests -Dcheckstyle.skip -Dlicense.skip -Dfindbugs.skip -Dmaven.javadoc.skip=true } +function setup_spark() { + [ ! -d "app/ctest-spark" ] && git clone https://github.com/ZHLOLin/spark.git app/ctest-spark + cd app/ctest-spark + git fetch && git checkout ctest_enable_intercepting + mvn clean install -pl core -am -DskipTests +} + function usage() { echo "Usage: add_project.sh
" exit 1 @@ -63,8 +70,9 @@ function main() { hadoop) setup_hadoop ;; hbase) setup_hbase ;; zookeeper) setup_zookeeper ;; + spark) setup_spark ;; alluxio) setup_alluxio ;; - *) echo "Unexpected project: $project - only support hadoop, hbase, zookeeper and alluxio." ;; + *) echo "Unexpected project: $project - only support hadoop, hbase, zookeeper, spark and alluxio." ;; esac fi } diff --git a/core/ctest_const.py b/core/ctest_const.py index d0820f43..270687d0 100644 --- a/core/ctest_const.py +++ b/core/ctest_const.py @@ -12,11 +12,13 @@ HBASE = "hbase-server" ZOOKEEPER = "zookeeper-server" ALLUXIO = "alluxio-core" +SPARK = "spark-core" CTEST_HADOOP_DIR = os.path.join(APP_DIR, "ctest-hadoop") CTEST_HBASE_DIR = os.path.join(APP_DIR, "ctest-hbase") CTEST_ZK_DIR = os.path.join(APP_DIR, "ctest-zookeeper") CTEST_ALLUXIO_DIR = os.path.join(APP_DIR, "ctest-alluxio") +CTEST_SPARK_DIR = os.path.join(APP_DIR, "ctest-spark") PROJECT_DIR = { HCOMMON: CTEST_HADOOP_DIR, @@ -24,6 +26,7 @@ HBASE: CTEST_HBASE_DIR, ZOOKEEPER: CTEST_ZK_DIR, ALLUXIO: CTEST_ALLUXIO_DIR, + SPARK: CTEST_SPARK_DIR } @@ -34,6 +37,7 @@ HBASE: "hbase-server", ZOOKEEPER: "zookeeper-server", ALLUXIO: "core", + SPARK: "core" } @@ -58,6 +62,7 @@ os.path.join(CTEST_ALLUXIO_DIR, MODULE_SUBDIR[ALLUXIO], "server/worker", SUREFIRE_SUBDIR), os.path.join(CTEST_ALLUXIO_DIR, MODULE_SUBDIR[ALLUXIO], "server/master", SUREFIRE_SUBDIR), ], + SPARK: [os.path.join(CTEST_SPARK_DIR, MODULE_SUBDIR[SPARK], SUREFIRE_SUBDIR)] } # default or deprecate conf path @@ -74,7 +79,8 @@ HDFS: os.path.join(DEFAULT_CONF_DIR, HDFS + "-default.tsv"), HBASE: os.path.join(DEFAULT_CONF_DIR, HBASE + "-default.tsv"), ALLUXIO: os.path.join(DEFAULT_CONF_DIR, ALLUXIO + "-default.tsv"), - ZOOKEEPER: os.path.join(DEFAULT_CONF_DIR, ZOOKEEPER + "-default.tsv") + ZOOKEEPER: os.path.join(DEFAULT_CONF_DIR, ZOOKEEPER + "-default.tsv"), + SPARK: os.path.join(DEFAULT_CONF_DIR, SPARK + "-default.tsv") } @@ -96,6 +102,9 @@ ], ALLUXIO: [ os.path.join(CTEST_ALLUXIO_DIR, "core/alluxio-ctest.properties") + ], + SPARK: [ + CTEST_SPARK_DIR ] } diff --git a/core/default_configs/spark-core-default.tsv b/core/default_configs/spark-core-default.tsv new file mode 100644 index 00000000..10fb8a8f --- /dev/null +++ b/core/default_configs/spark-core-default.tsv @@ -0,0 +1,365 @@ +spark.app.name (none) The name of your application. This will appear in the UI and in log data. +spark.driver.cores 1 Number of cores to use for the driver process, only in cluster mode. +spark.driver.maxResultSize 1g Limit of total size of serialized results of all partitions for each Spark action (e.g. collect) in bytes. Should be at least 1M, or 0 for unlimited. Jobs will be aborted if the total size is above this limit. Having a high limit may cause out-of-memory errors in driver (depends on spark.driver.memory and memory overhead of objects in JVM). Setting a proper limit can protect the driver from out-of-memory errors. +spark.driver.memory 1g "Amount of memory to use for the driver process, i.e. where SparkContext is initialized, in the same format as JVM memory strings with a size unit suffix (""k"", ""m"", ""g"" or ""t"") (e.g. 512m, 2g).Note: In client mode, this config must not be set through the SparkConf directly in your application, because the driver JVM has already started at that point. Instead, please set this through the --driver-memory command line option or in your default properties file." +spark.driver.memoryOverhead driverMemory * spark.driver.memoryOverheadFactor, with minimum of 384 Amount of non-heap memory to be allocated per driver process in cluster mode, in MiB unless otherwise specified. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. This tends to grow with the container size (typically 6-10%). This option is currently supported on YARN, Mesos and Kubernetes. Note: Non-heap memory includes off-heap memory (when spark.memory.offHeap.enabled=true) and memory used by other driver processes (e.g. python process that goes with a PySpark driver) and memory used by other non-driver processes running in the same container. The maximum memory size of container to running driver is determined by the sum of spark.driver.memoryOverhead and spark.driver.memory. +spark.driver.memoryOverheadFactor 0.10 "Fraction of driver memory to be allocated as additional non-heap memory per driver process in cluster mode. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. This tends to grow with the container size. This value defaults to 0.10 except for Kubernetes non-JVM jobs, which defaults to 0.40. This is done as non-JVM tasks need more non-JVM heap space and such tasks commonly fail with ""Memory Overhead Exceeded"" errors. This preempts this error with a higher default. This value is ignored if spark.driver.memoryOverhead is set directly." +spark.driver.resource.{resourceName}.amount 0 Amount of a particular resource type to use on the driver. If this is used, you must also specify the spark.driver.resource.{resourceName}.discoveryScript for the driver to find the resource on startup. +spark.driver.resource.{resourceName}.discoveryScript None A script for the driver to run to discover a particular resource type. This should write to STDOUT a JSON string in the format of the ResourceInformation class. This has a name and an array of addresses. For a client-submitted driver, discovery script must assign different resource addresses to this driver comparing to other drivers on the same host. +spark.driver.resource.{resourceName}.vendor None Vendor of the resources to use for the driver. This option is currently only supported on Kubernetes and is actually both the vendor and domain following the Kubernetes device plugin naming convention. (e.g. For GPUs on Kubernetes this config would be set to nvidia.com or amd.com) +spark.resources.discoveryPlugin org.apache.spark.resource.ResourceDiscoveryScriptPlugin Comma-separated list of class names implementing org.apache.spark.api.resource.ResourceDiscoveryPlugin to load into the application. This is for advanced users to replace the resource discovery class with a custom implementation. Spark will try each class specified until one of them returns the resource information for that resource. It tries the discovery script last if none of the plugins return information for that resource. +spark.executor.memory 1g "Amount of memory to use per executor process, in the same format as JVM memory strings with a size unit suffix (""k"", ""m"", ""g"" or ""t"") (e.g. 512m, 2g)." +spark.executor.pyspark.memory Not set The amount of memory to be allocated to PySpark in each executor, in MiB unless otherwise specified. If set, PySpark memory for an executor will be limited to this amount. If not set, Spark will not limit Python's memory use and it is up to the application to avoid exceeding the overhead memory space shared with other non-JVM processes. When PySpark is run in YARN or Kubernetes, this memory is added to executor resource requests.Note: This feature is dependent on Python's `resource` module; therefore, the behaviors and limitations are inherited. For instance, Windows does not support resource limiting and actual resource is not limited on MacOS. +spark.executor.memoryOverhead executorMemory * spark.executor.memoryOverheadFactor, with minimum of 384 Amount of additional memory to be allocated per executor process, in MiB unless otherwise specified. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. This tends to grow with the executor size (typically 6-10%). This option is currently supported on YARN and Kubernetes.Note: Additional memory includes PySpark executor memory (when spark.executor.pyspark.memory is not configured) and memory used by other non-executor processes running in the same container. The maximum memory size of container to running executor is determined by the sum of spark.executor.memoryOverhead, spark.executor.memory, spark.memory.offHeap.size and spark.executor.pyspark.memory. +spark.executor.memoryOverheadFactor 0.10 "Fraction of executor memory to be allocated as additional non-heap memory per executor process. This is memory that accounts for things like VM overheads, interned strings, other native overheads, etc. This tends to grow with the container size. This value defaults to 0.10 except for Kubernetes non-JVM jobs, which defaults to 0.40. This is done as non-JVM tasks need more non-JVM heap space and such tasks commonly fail with ""Memory Overhead Exceeded"" errors. This preempts this error with a higher default. This value is ignored if spark.executor.memoryOverhead is set directly." +spark.executor.resource.{resourceName}.amount 0 Amount of a particular resource type to use per executor process. If this is used, you must also specify the spark.executor.resource.{resourceName}.discoveryScript for the executor to find the resource on startup. +spark.executor.resource.{resourceName}.discoveryScript None A script for the executor to run to discover a particular resource type. This should write to STDOUT a JSON string in the format of the ResourceInformation class. This has a name and an array of addresses. +spark.executor.resource.{resourceName}.vendor None Vendor of the resources to use for the executors. This option is currently only supported on Kubernetes and is actually both the vendor and domain following the Kubernetes device plugin naming convention. (e.g. For GPUs on Kubernetes this config would be set to nvidia.com or amd.com) +spark.extraListeners (none) A comma-separated list of classes that implement SparkListener; when initializing SparkContext, instances of these classes will be created and registered with Spark's listener bus. If a class has a single-argument constructor that accepts a SparkConf, that constructor will be called; otherwise, a zero-argument constructor will be called. If no valid constructor can be found, the SparkContext creation will fail with an exception. +spark.local.dir /tmp "Directory to use for ""scratch"" space in Spark, including map output files and RDDs that get stored on disk. This should be on a fast, local disk in your system. It can also be a comma-separated list of multiple directories on different disks.Note: This will be overridden by SPARK_LOCAL_DIRS (Standalone), MESOS_SANDBOX (Mesos) or LOCAL_DIRS (YARN) environment variables set by the cluster manager." +spark.logConf false Logs the effective SparkConf as INFO when a SparkContext is started. +spark.master (none) The cluster manager to connect to. See the list of allowed master URL's. +spark.submit.deployMode (none) "The deploy mode of Spark driver program, either ""client"" or ""cluster"", Which means to launch driver program locally (""client"") or remotely (""cluster"") on one of the nodes inside the cluster." +spark.log.callerContext (none) Application information that will be written into Yarn RM log/HDFS audit log when running on Yarn/HDFS. Its length depends on the Hadoop configuration hadoop.caller.context.max.size. It should be concise, and typically can have up to 50 characters. +spark.driver.supervise false If true, restarts the driver automatically if it fails with a non-zero exit status. Only has effect in Spark standalone mode or Mesos cluster deploy mode. +spark.driver.log.dfsDir (none) Base directory in which Spark driver logs are synced, if spark.driver.log.persistToDfs.enabled is true. Within this base directory, each application logs the driver logs to an application specific file. Users may want to set this to a unified location like an HDFS directory so driver log files can be persisted for later usage. This directory should allow any Spark user to read/write files and the Spark History Server user to delete files. Additionally, older logs from this directory are cleaned by the Spark History Server if spark.history.fs.driverlog.cleaner.enabled is true and, if they are older than max age configured by setting spark.history.fs.driverlog.cleaner.maxAge. +spark.driver.log.persistToDfs.enabled false If true, spark application running in client mode will write driver logs to a persistent storage, configured in spark.driver.log.dfsDir. If spark.driver.log.dfsDir is not configured, driver logs will not be persisted. Additionally, enable the cleaner by setting spark.history.fs.driverlog.cleaner.enabled to true in Spark History Server. +spark.driver.log.layout %d{yy/MM/dd HH:mm:ss.SSS} %t %p %c{1}: %m%n%ex The layout for the driver logs that are synced to spark.driver.log.dfsDir. If this is not configured, it uses the layout for the first appender defined in log4j2.properties. If that is also not configured, driver logs use the default layout. +spark.driver.log.allowErasureCoding false Whether to allow driver logs to use erasure coding. On HDFS, erasure coded files will not update as quickly as regular replicated files, so they make take longer to reflect changes written by the application. Note that even if this is true, Spark will still not force the file to use erasure coding, it will simply use file system defaults. +spark.driver.extraClassPath (none) Extra classpath entries to prepend to the classpath of the driver.Note: In client mode, this config must not be set through the SparkConf directly in your application, because the driver JVM has already started at that point. Instead, please set this through the --driver-class-path command line option or in your default properties file. +spark.driver.defaultJavaOptions (none) A string of default JVM options to prepend to spark.driver.extraJavaOptions. This is intended to be set by administrators. For instance, GC settings or other logging. Note that it is illegal to set maximum heap size (-Xmx) settings with this option. Maximum heap size settings can be set with spark.driver.memory in the cluster mode and through the --driver-memory command line option in the client mode.Note: In client mode, this config must not be set through the SparkConf directly in your application, because the driver JVM has already started at that point. Instead, please set this through the --driver-java-options command line option or in your default properties file. +spark.driver.extraJavaOptions (none) A string of extra JVM options to pass to the driver. This is intended to be set by users. For instance, GC settings or other logging. Note that it is illegal to set maximum heap size (-Xmx) settings with this option. Maximum heap size settings can be set with spark.driver.memory in the cluster mode and through the --driver-memory command line option in the client mode.Note: In client mode, this config must not be set through the SparkConf directly in your application, because the driver JVM has already started at that point. Instead, please set this through the --driver-java-options command line option or in your default properties file. spark.driver.defaultJavaOptions will be prepended to this configuration. +spark.driver.extraLibraryPath (none) Set a special library path to use when launching the driver JVM.Note: In client mode, this config must not be set through the SparkConf directly in your application, because the driver JVM has already started at that point. Instead, please set this through the --driver-library-path command line option or in your default properties file. +spark.driver.userClassPathFirst false (Experimental) Whether to give user-added jars precedence over Spark's own jars when loading classes in the driver. This feature can be used to mitigate conflicts between Spark's dependencies and user dependencies. It is currently an experimental feature. This is used in cluster mode only. +spark.executor.extraClassPath (none) Extra classpath entries to prepend to the classpath of executors. This exists primarily for backwards-compatibility with older versions of Spark. Users typically should not need to set this option. +spark.executor.defaultJavaOptions (none) A string of default JVM options to prepend to spark.executor.extraJavaOptions. This is intended to be set by administrators. For instance, GC settings or other logging. Note that it is illegal to set Spark properties or maximum heap size (-Xmx) settings with this option. Spark properties should be set using a SparkConf object or the spark-defaults.conf file used with the spark-submit script. Maximum heap size settings can be set with spark.executor.memory. The following symbols, if present will be interpolated: will be replaced by application ID and will be replaced by executor ID. For example, to enable verbose gc logging to a file named for the executor ID of the app in /tmp, pass a 'value' of: -verbose:gc -Xloggc:/tmp/-.gc +spark.executor.extraJavaOptions (none) A string of extra JVM options to pass to executors. This is intended to be set by users. For instance, GC settings or other logging. Note that it is illegal to set Spark properties or maximum heap size (-Xmx) settings with this option. Spark properties should be set using a SparkConf object or the spark-defaults.conf file used with the spark-submit script. Maximum heap size settings can be set with spark.executor.memory. The following symbols, if present will be interpolated: will be replaced by application ID and will be replaced by executor ID. For example, to enable verbose gc logging to a file named for the executor ID of the app in /tmp, pass a 'value' of: -verbose:gc -Xloggc:/tmp/-.gc spark.executor.defaultJavaOptions will be prepended to this configuration. +spark.executor.extraLibraryPath (none) Set a special library path to use when launching executor JVM's. +spark.executor.logs.rolling.maxRetainedFiles (none) Sets the number of latest rolling log files that are going to be retained by the system. Older log files will be deleted. Disabled by default. +spark.executor.logs.rolling.enableCompression false Enable executor log compression. If it is enabled, the rolled executor logs will be compressed. Disabled by default. +spark.executor.logs.rolling.maxSize (none) Set the max size of the file in bytes by which the executor logs will be rolled over. Rolling is disabled by default. See spark.executor.logs.rolling.maxRetainedFiles for automatic cleaning of old logs. +spark.executor.logs.rolling.strategy (none) "Set the strategy of rolling of executor logs. By default it is disabled. It can be set to ""time"" (time-based rolling) or ""size"" (size-based rolling). For ""time"", use spark.executor.logs.rolling.time.interval to set the rolling interval. For ""size"", use spark.executor.logs.rolling.maxSize to set the maximum file size for rolling." +spark.executor.logs.rolling.time.interval daily Set the time interval by which the executor logs will be rolled over. Rolling is disabled by default. Valid values are daily, hourly, minutely or any interval in seconds. See spark.executor.logs.rolling.maxRetainedFiles for automatic cleaning of old logs. +spark.executor.userClassPathFirst false (Experimental) Same functionality as spark.driver.userClassPathFirst, but applied to executor instances. +spark.executorEnv.[EnvironmentVariableName] (none) Add the environment variable specified by EnvironmentVariableName to the Executor process. The user can specify multiple of these to set multiple environment variables. +spark.redaction.regex (?i)secret|password|token Regex to decide which Spark configuration properties and environment variables in driver and executor environments contain sensitive information. When this regex matches a property key or value, the value is redacted from the environment UI and various logs like YARN and event logs. +spark.python.profile false Enable profiling in Python worker, the profile result will show up by sc.show_profiles(), or it will be displayed before the driver exits. It also can be dumped into disk by sc.dump_profiles(path). If some of the profile results had been displayed manually, they will not be displayed automatically before driver exiting. By default the pyspark.profiler.BasicProfiler will be used, but this can be overridden by passing a profiler class in as a parameter to the SparkContext constructor. +spark.python.profile.dump (none) The directory which is used to dump the profile result before driver exiting. The results will be dumped as separated file for each RDD. They can be loaded by pstats.Stats(). If this is specified, the profile result will not be displayed automatically. +spark.python.worker.memory 512m "Amount of memory to use per python worker process during aggregation, in the same format as JVM memory strings with a size unit suffix (""k"", ""m"", ""g"" or ""t"") (e.g. 512m, 2g). If the memory used during aggregation goes above this amount, it will spill the data into disks." +spark.python.worker.reuse true Reuse Python worker or not. If yes, it will use a fixed number of Python workers, does not need to fork() a Python process for every task. It will be very useful if there is a large broadcast, then the broadcast will not need to be transferred from JVM to Python worker for every task. +spark.files (none) Comma-separated list of files to be placed in the working directory of each executor. Globs are allowed. +spark.submit.pyFiles (none) Comma-separated list of .zip, .egg, or .py files to place on the PYTHONPATH for Python apps. Globs are allowed. +spark.jars (none) Comma-separated list of jars to include on the driver and executor classpaths. Globs are allowed. +spark.jars.packages (none) Comma-separated list of Maven coordinates of jars to include on the driver and executor classpaths. The coordinates should be groupId:artifactId:version. If spark.jars.ivySettings is given artifacts will be resolved according to the configuration in the file, otherwise artifacts will be searched for in the local maven repo, then maven central and finally any additional remote repositories given by the command-line option --repositories. For more details, see Advanced Dependency Management. +spark.jars.excludes (none) Comma-separated list of groupId:artifactId, to exclude while resolving the dependencies provided in spark.jars.packages to avoid dependency conflicts. +spark.jars.ivy (none) Path to specify the Ivy user directory, used for the local Ivy cache and package files from spark.jars.packages. This will override the Ivy property ivy.default.ivy.user.dir which defaults to ~/.ivy2. +spark.jars.ivySettings (none) Path to an Ivy settings file to customize resolution of jars specified using spark.jars.packages instead of the built-in defaults, such as maven central. Additional repositories given by the command-line option --repositories or spark.jars.repositories will also be included. Useful for allowing Spark to resolve artifacts from behind a firewall e.g. via an in-house artifact server like Artifactory. Details on the settings file format can be found at Settings Files. Only paths with file:// scheme are supported. Paths without a scheme are assumed to have a file:// scheme.When running in YARN cluster mode, this file will also be localized to the remote driver for dependency resolution within SparkContext#addJar +spark.jars.repositories (none) Comma-separated list of additional remote repositories to search for the maven coordinates given with --packages or spark.jars.packages. +spark.archives (none) Comma-separated list of archives to be extracted into the working directory of each executor. .jar, .tar.gz, .tgz and .zip are supported. You can specify the directory name to unpack via adding # after the file name to unpack, for example, file.zip#directory. This configuration is experimental. +spark.pyspark.driver.python (none) Python binary executable to use for PySpark in driver. (default is spark.pyspark.python) +spark.pyspark.python (none) Python binary executable to use for PySpark in both driver and executors. +spark.reducer.maxSizeInFlight 48m Maximum size of map outputs to fetch simultaneously from each reduce task, in MiB unless otherwise specified. Since each output requires us to create a buffer to receive it, this represents a fixed memory overhead per reduce task, so keep it small unless you have a large amount of memory. +spark.reducer.maxReqsInFlight Int.MaxValue This configuration limits the number of remote requests to fetch blocks at any given point. When the number of hosts in the cluster increase, it might lead to very large number of inbound connections to one or more nodes, causing the workers to fail under load. By allowing it to limit the number of fetch requests, this scenario can be mitigated. +spark.reducer.maxBlocksInFlightPerAddress Int.MaxValue This configuration limits the number of remote blocks being fetched per reduce task from a given host port. When a large number of blocks are being requested from a given address in a single fetch or simultaneously, this could crash the serving executor or Node Manager. This is especially useful to reduce the load on the Node Manager when external shuffle is enabled. You can mitigate this issue by setting it to a lower value. +spark.shuffle.compress true Whether to compress map output files. Generally a good idea. Compression will use spark.io.compression.codec. +spark.shuffle.file.buffer 32k Size of the in-memory buffer for each shuffle file output stream, in KiB unless otherwise specified. These buffers reduce the number of disk seeks and system calls made in creating intermediate shuffle files. +spark.shuffle.io.maxRetries 3 (Netty only) Fetches that fail due to IO-related exceptions are automatically retried if this is set to a non-zero value. This retry logic helps stabilize large shuffles in the face of long GC pauses or transient network connectivity issues. +spark.shuffle.io.numConnectionsPerPeer 1 (Netty only) Connections between hosts are reused in order to reduce connection buildup for large clusters. For clusters with many hard disks and few hosts, this may result in insufficient concurrency to saturate all disks, and so users may consider increasing this value. +spark.shuffle.io.preferDirectBufs true (Netty only) Off-heap buffers are used to reduce garbage collection during shuffle and cache block transfer. For environments where off-heap memory is tightly limited, users may wish to turn this off to force all allocations from Netty to be on-heap. +spark.shuffle.io.retryWait 5s (Netty only) How long to wait between retries of fetches. The maximum delay caused by retrying is 15 seconds by default, calculated as maxRetries * retryWait. +spark.shuffle.io.backLog -1 Length of the accept queue for the shuffle service. For large applications, this value may need to be increased, so that incoming connections are not dropped if the service cannot keep up with a large number of connections arriving in a short period of time. This needs to be configured wherever the shuffle service itself is running, which may be outside of the application (see spark.shuffle.service.enabled option below). If set below 1, will fallback to OS default defined by Netty's io.netty.util.NetUtil#SOMAXCONN. +spark.shuffle.io.connectionTimeout value of spark.network.timeout Timeout for the established connections between shuffle servers and clients to be marked as idled and closed if there are still outstanding fetch requests but no traffic no the channel for at least `connectionTimeout`. +spark.shuffle.service.enabled false Enables the external shuffle service. This service preserves the shuffle files written by executors e.g. so that executors can be safely removed, or so that shuffle fetches can continue in the event of executor failure. The external shuffle service must be set up in order to enable it. See dynamic allocation configuration and setup documentation for more information. +spark.shuffle.service.port 7337 Port on which the external shuffle service will run. +spark.shuffle.service.index.cache.size 100m Cache entries limited to the specified memory footprint, in bytes unless otherwise specified. +spark.shuffle.service.removeShuffle false Whether to use the ExternalShuffleService for deleting shuffle blocks for deallocated executors when the shuffle is no longer needed. Without this enabled, shuffle data on executors that are deallocated will remain on disk until the application ends. +spark.shuffle.maxChunksBeingTransferred Long.MAX_VALUE The max number of chunks allowed to be transferred at the same time on shuffle service. Note that new incoming connections will be closed when the max number is hit. The client will retry according to the shuffle retry configs (see spark.shuffle.io.maxRetries and spark.shuffle.io.retryWait), if those limits are reached the task will fail with fetch failure. +spark.shuffle.sort.bypassMergeThreshold 200 (Advanced) In the sort-based shuffle manager, avoid merge-sorting data if there is no map-side aggregation and there are at most this many reduce partitions. +spark.shuffle.spill.compress true Whether to compress data spilled during shuffles. Compression will use spark.io.compression.codec. +spark.shuffle.accurateBlockThreshold 100 * 1024 * 1024 Threshold in bytes above which the size of shuffle blocks in HighlyCompressedMapStatus is accurately recorded. This helps to prevent OOM by avoiding underestimating shuffle block size when fetch shuffle blocks. +spark.shuffle.registration.timeout 5000 Timeout in milliseconds for registration to the external shuffle service. +spark.shuffle.registration.maxAttempts 3 When we fail to register to the external shuffle service, we will retry for maxAttempts times. +spark.files.io.connectionTimeout value of spark.network.timeout Timeout for the established connections for fetching files in Spark RPC environments to be marked as idled and closed if there are still outstanding files being downloaded but no traffic no the channel for at least `connectionTimeout`. +spark.shuffle.checksum.enabled true Whether to calculate the checksum of shuffle data. If enabled, Spark will calculate the checksum values for each partition data within the map output file and store the values in a checksum file on the disk. When there's shuffle data corruption detected, Spark will try to diagnose the cause (e.g., network issue, disk issue, etc.) of the corruption by using the checksum file. +spark.shuffle.checksum.algorithm ADLER32 The algorithm is used to calculate the shuffle checksum. Currently, it only supports built-in algorithms of JDK, e.g., ADLER32, CRC32. +spark.shuffle.service.fetch.rdd.enabled false Whether to use the ExternalShuffleService for fetching disk persisted RDD blocks. In case of dynamic allocation if this feature is enabled executors having only disk persisted blocks are considered idle after spark.dynamicAllocation.executorIdleTimeout and will be released accordingly. +spark.driver.extraClassPath (none) Extra classpath entries to prepend to the classpath of the driver. +spark.driver.defaultJavaOptions (none) A string of default JVM options to prepend to spark.driver.extraJavaOptions. This is intended to be set by administrators. For instance, GC settings or other logging.Note that it is illegal to set maximum heap size (-Xmx) settings with this option. Maximum heapsize settings can be set with spark.driver.memory in the cluster mode and through the --driver-memory command line option in the client mode. +spark.driver.extraJavaOptions (none) A string of extra JVM options to pass to the driver. This is intended to be set by users. +spark.driver.extraLibraryPath (none) Set a special library path to use when launching the driver JVM. +spark.driver.userClassPathFirst false (Experimental) Whether to give user-added jars precedence over Spark's own jars when loading classes in the driver. This feature can be used to mitigate conflicts between Spark's dependencies and user dependencies. It is currently an experimental feature. +spark.executor.extraClassPath (none) Extra classpath entries to prepend to the classpath of executors. This exists primarily for backwards-compatibility with older versions of Spark. Users typically should not need to set this option. +spark.executor.defaultJavaOptions (none) A string of default JVM options to prepend to spark.executor.extraJavaOptions. This is intended to be set by administrators. +spark.executor.extraJavaOptions (none) A string of extra JVM options to pass to executors. This is intended to be set by users. +spark.executor.extraLibraryPath (none) Set a special library path to use when launching executor JVM's. +spark.executor.logs.rolling.maxRetainedFiles (none) Sets the number of latest rolling log files that are going to be retained by the system. Older log files will be deleted. Disabled by default. +spark.executor.logs.rolling.enableCompression false Enable executor log compression. If it is enabled, the rolled executor logs will be compressed. Disabled by default. +spark.executor.logs.rolling.maxSize (none) Set the max size of the file in bytes by which the executor logs will be rolled over. Rolling is disabled by default. See spark.executor.logs.rolling.maxRetainedFiles for automatic cleaning of old logs. +spark.executor.logs.rolling.strategy (none) "Set the strategy of rolling of executor logs. By default it is disabled. It can be set to ""time"" (time-based rolling) or ""size"" (size-based rolling). For ""time"", use spark.executor.logs.rolling.time.interval to set the rolling interval. For ""size"", use spark.executor.logs.rolling.maxSize to set the maximum file size for rolling." +spark.executor.logs.rolling.time.interval daily Set the time interval by which the executor logs will be rolled over. Rolling is disabled by default. Valid values are daily, hourly, minutely or any interval in seconds. See spark.executor.logs.rolling.maxRetainedFiles for automatic cleaning of old logs. +spark.executor.userClassPathFirst false (Experimental) Same functionality as spark.driver.userClassPathFirst, but applied to executor instances. +spark.executorEnv.[EnvironmentVariableName] (none) Add the environment variable specified by EnvironmentVariableName to the Executor process. The user can specify multiple of these to set multiple environment variables. +spark.redaction.regex (?i)secret|password|token|access[.]key Regex to decide which Spark configuration properties and environment variables in driver and executor environments contain sensitive information. When this regex matches a property key or value, the value is redacted from the environment UI and various logs like YARN and event logs. +spark.redaction.string.regex (none) Regex to decide which parts of strings produced by Spark contain sensitive information. When this regex matches a string part, that string part is replaced by a dummy value. This is currently used to redact the output of SQL explain commands. +spark.python.profile false Enable profiling in Python worker, the profile result will show up by sc.show_profiles(), or it will be displayed before the driver exits. It also can be dumped into disk by sc.dump_profiles(path). If some of the profile results had been displayed manually, they will not be displayed automatically before driver exiting. By default the pyspark.profiler.BasicProfiler will be used, but this can be overridden by passing a profiler class in as a parameter to the SparkContext constructor. +spark.python.profile.dump (none) The directory which is used to dump the profile result before driver exiting. The results will be dumped as separated file for each RDD. They can be loaded by pstats.Stats(). If this is specified, the profile result will not be displayed automatically. +spark.python.worker.memory 512m "Amount of memory to use per python worker process during aggregation, in the same format as JVM memory strings with a size unit suffix (""k"", ""m"", ""g"" or ""t"") (e.g. 512m, 2g). If the memory used during aggregation goes above this amount, it will spill the data into disks." +spark.python.worker.reuse true Reuse Python worker or not. If yes, it will use a fixed number of Python workers, does not need to fork() a Python process for every task. It will be very useful if there is a large broadcast, then the broadcast will not need to be transferred from JVM to Python worker for every task. +spark.files (none) Comma-separated list of files to be placed in the working directory of each executor. Globs are allowed. +spark.submit.pyFiles (none) Comma-separated list of .zip, .egg, or .py files to place on the PYTHONPATH for Python apps. Globs are allowed. +spark.jars (none) Comma-separated list of jars to include on the driver and executor classpaths. Globs are allowed. +spark.jars.packages (none) Comma-separated list of Maven coordinates of jars to include on the driver and executor classpaths. The coordinates should be groupId:artifactId:version. If spark.jars.ivySettings is given artifacts will be resolved according to the configuration in the file, otherwise artifacts will be searched for in the local maven repo, then maven central and finally any additional remote repositories given by the command-line option --repositories. For more details, see Advanced Dependency Management. +spark.jars.excludes (none) Comma-separated list of groupId:artifactId, to exclude while resolving the dependencies provided in spark.jars.packages to avoid dependency conflicts. +spark.jars.ivy (none) Path to specify the Ivy user directory, used for the local Ivy cache and package files from spark.jars.packages. This will override the Ivy property ivy.default.ivy.user.dir which defaults to ~/.ivy2. +spark.jars.ivySettings (none) Path to an Ivy settings file to customize resolution of jars specified using spark.jars.packages instead of the built-in defaults, such as maven central. Additional repositories given by the command-line option --repositories or spark.jars.repositories will also be included. Useful for allowing Spark to resolve artifacts from behind a firewall e.g. via an in-house artifact server like Artifactory. Details on the settings file format can be found at Settings Files. Only paths with file:// scheme are supported. Paths without a scheme are assumed to have a file:// scheme. +spark.jars.repositories (none) Comma-separated list of additional remote repositories to search for the maven coordinates given with --packages or spark.jars.packages. +spark.archives (none) Comma-separated list of archives to be extracted into the working directory of each executor. .jar, .tar.gz, .tgz and .zip are supported. You can specify the directory name to unpack via adding # after the file name to unpack, for example, file.zip#directory. This configuration is experimental. +spark.pyspark.driver.python (none) Python binary executable to use for PySpark in driver. (default is spark.pyspark.python) +spark.pyspark.python (none) Python binary executable to use for PySpark in both driver and executors. +spark.reducer.maxSizeInFlight 48m Maximum size of map outputs to fetch simultaneously from each reduce task, in MiB unless otherwise specified. Since each output requires us to create a buffer to receive it, this represents a fixed memory overhead per reduce task, so keep it small unless you have a large amount of memory. +spark.reducer.maxReqsInFlight Int.MaxValue This configuration limits the number of remote requests to fetch blocks at any given point. When the number of hosts in the cluster increase, it might lead to very large number of inbound connections to one or more nodes, causing the workers to fail under load. By allowing it to limit the number of fetch requests, this scenario can be mitigated. +spark.reducer.maxBlocksInFlightPerAddress Int.MaxValue This configuration limits the number of remote blocks being fetched per reduce task from a given host port. When a large number of blocks are being requested from a given address in a single fetch or simultaneously, this could crash the serving executor or Node Manager. This is especially useful to reduce the load on the Node Manager when external shuffle is enabled. You can mitigate this issue by setting it to a lower value. +spark.shuffle.compress true Whether to compress map output files. Generally a good idea. Compression will use spark.io.compression.codec. +spark.shuffle.file.buffer 32k Size of the in-memory buffer for each shuffle file output stream, in KiB unless otherwise specified. These buffers reduce the number of disk seeks and system calls made in creating intermediate shuffle files. +spark.shuffle.unsafe.file.output.buffer 32k The file system for this buffer size after each partition is written in unsafe shuffle writer. In KiB unless otherwise specified. +spark.shuffle.spill.diskWriteBufferSize 1024 * 1024 The buffer size, in bytes, to use when writing the sorted records to an on-disk file. +spark.shuffle.io.maxRetries 3 (Netty only) Fetches that fail due to IO-related exceptions are automatically retried if this is set to a non-zero value. This retry logic helps stabilize large shuffles in the face of long GC pauses or transient network connectivity issues. +spark.shuffle.io.numConnectionsPerPeer 1 (Netty only) Connections between hosts are reused in order to reduce connection buildup for large clusters. For clusters with many hard disks and few hosts, this may result in insufficient concurrency to saturate all disks, and so users may consider increasing this value. +spark.shuffle.io.preferDirectBufs true (Netty only) Off-heap buffers are used to reduce garbage collection during shuffle and cache block transfer. For environments where off-heap memory is tightly limited, users may wish to turn this off to force all allocations from Netty to be on-heap. +spark.shuffle.io.retryWait 5s (Netty only) How long to wait between retries of fetches. The maximum delay caused by retrying is 15 seconds by default, calculated as maxRetries * retryWait. +spark.shuffle.io.backLog -1 Length of the accept queue for the shuffle service. For large applications, this value may need to be increased, so that incoming connections are not dropped if the service cannot keep up with a large number of connections arriving in a short period of time. This needs to be configured wherever the shuffle service itself is running, which may be outside of the application (see spark.shuffle.service.enabled option below). If set below 1, will fallback to OS default defined by Netty's io.netty.util.NetUtil#SOMAXCONN. +spark.shuffle.io.connectionTimeout value of spark.network.timeout Timeout for the established connections between shuffle servers and clients to be marked as idled and closed if there are still outstanding fetch requests but no traffic no the channel for at least `connectionTimeout`. +spark.shuffle.service.enabled false Enables the external shuffle service. This service preserves the shuffle files written by executors e.g. so that executors can be safely removed, or so that shuffle fetches can continue in the event of executor failure. The external shuffle service must be set up in order to enable it. See dynamic allocation configuration and setup documentation for more information. +spark.shuffle.service.port 7337 Port on which the external shuffle service will run. +spark.shuffle.service.name spark_shuffle The configured name of the Spark shuffle service the client should communicate with. This must match the name used to configure the Shuffle within the YARN NodeManager configuration (yarn.nodemanager.aux-services). Only takes effect when spark.shuffle.service.enabled is set to true. +spark.shuffle.service.index.cache.size 100m Cache entries limited to the specified memory footprint, in bytes unless otherwise specified. +spark.shuffle.service.removeShuffle false Whether to use the ExternalShuffleService for deleting shuffle blocks for deallocated executors when the shuffle is no longer needed. Without this enabled, shuffle data on executors that are deallocated will remain on disk until the application ends. +spark.shuffle.maxChunksBeingTransferred Long.MAX_VALUE The max number of chunks allowed to be transferred at the same time on shuffle service. Note that new incoming connections will be closed when the max number is hit. The client will retry according to the shuffle retry configs (see spark.shuffle.io.maxRetries and spark.shuffle.io.retryWait), if those limits are reached the task will fail with fetch failure. +spark.shuffle.sort.bypassMergeThreshold 200 (Advanced) In the sort-based shuffle manager, avoid merge-sorting data if there is no map-side aggregation and there are at most this many reduce partitions. +spark.shuffle.sort.io.plugin.class org.apache.spark.shuffle.sort.io.LocalDiskShuffleDataIO Name of the class to use for shuffle IO. +spark.shuffle.spill.compress true Whether to compress data spilled during shuffles. Compression will use spark.io.compression.codec. +spark.shuffle.accurateBlockThreshold 100 * 1024 * 1024 Threshold in bytes above which the size of shuffle blocks in HighlyCompressedMapStatus is accurately recorded. This helps to prevent OOM by avoiding underestimating shuffle block size when fetch shuffle blocks. +spark.shuffle.registration.timeout 5000 Timeout in milliseconds for registration to the external shuffle service. +spark.shuffle.registration.maxAttempts 3 When we fail to register to the external shuffle service, we will retry for maxAttempts times. +spark.shuffle.reduceLocality.enabled true Whether to compute locality preferences for reduce tasks. +spark.shuffle.mapOutput.minSizeForBroadcast 512k The size at which we use Broadcast to send the map output statuses to the executors. +spark.shuffle.detectCorrupt true Whether to detect any corruption in fetched blocks. +spark.shuffle.detectCorrupt.useExtraMemory false If enabled, part of a compressed/encrypted stream will be de-compressed/de-crypted by using extra memory to detect early corruption. Any IOException thrown will cause the task to be retried once and if it fails again with same exception, then FetchFailedException will be thrown to retry previous stage. +spark.shuffle.useOldFetchProtocol false Whether to use the old protocol while doing the shuffle block fetching. It is only enabled while we need the compatibility in the scenario of new Spark version job fetching shuffle blocks from old version external shuffle service. +spark.shuffle.readHostLocalDisk true If enabled (and spark.shuffle.useOldFetchProtocol is disabled, shuffle blocks requested from those block managers which are running on the same host are read from the disk directly instead of being fetched as remote blocks over the network. +spark.files.io.connectionTimeout value of spark.network.timeout Timeout for the established connections for fetching files in Spark RPC environments to be marked as idled and closed if there are still outstanding files being downloaded but no traffic no the channel for at least `connectionTimeout`. +spark.shuffle.checksum.enabled true Whether to calculate the checksum of shuffle data. If enabled, Spark will calculate the checksum values for each partition data within the map output file and store the values in a checksum file on the disk. When there's shuffle data corruption detected, Spark will try to diagnose the cause (e.g., network issue, disk issue, etc.) of the corruption by using the checksum file. +spark.shuffle.checksum.algorithm ADLER32 The algorithm is used to calculate the shuffle checksum. Currently, it only supports built-in algorithms of JDK, e.g., ADLER32, CRC32. +spark.shuffle.service.fetch.rdd.enabled false Whether to use the ExternalShuffleService for fetching disk persisted RDD blocks. In case of dynamic allocation if this feature is enabled executors having only disk persisted blocks are considered idle after spark.dynamicAllocation.executorIdleTimeout and will be released accordingly. +spark.shuffle.service.db.enabled true Whether to use db in ExternalShuffleService. Note that this only affects standalone mode. +spark.shuffle.service.db.backend LEVELDB Specifies a disk-based store used in shuffle service local db. Setting as LEVELDB or ROCKSDB. +spark.eventLog.logBlockUpdates.enabled false Whether to log events for every block update, if spark.eventLog.enabled is true. *Warning*: This will increase the size of the event log considerably. +spark.eventLog.longForm.enabled false If true, use the long form of call sites in the event log. Otherwise use the short form. +spark.eventLog.compress false Whether to compress logged events, if spark.eventLog.enabled is true. +spark.eventLog.compression.codec zstd The codec to compress logged events. By default, Spark provides four codecs: lz4, lzf, snappy, and zstd. You can also use fully qualified class names to specify the codec, e.g. org.apache.spark.io.LZ4CompressionCodec, org.apache.spark.io.LZFCompressionCodec, org.apache.spark.io.SnappyCompressionCodec, and org.apache.spark.io.ZStdCompressionCodec. +spark.eventLog.erasureCoding.enabled false Whether to allow event logs to use erasure coding, or turn erasure coding off, regardless of filesystem defaults. On HDFS, erasure coded files will not update as quickly as regular replicated files, so the application updates will take longer to appear in the History Server. Note that even if this is true, Spark will still not force the file to use erasure coding, it will simply use filesystem defaults. +spark.eventLog.dir file:///tmp/spark-events Base directory in which Spark events are logged, if spark.eventLog.enabled is true. Within this base directory, Spark creates a sub-directory for each application, and logs the events specific to the application in this directory. Users may want to set this to a unified location like an HDFS directory so history files can be read by the history server. +spark.eventLog.enabled false Whether to log Spark events, useful for reconstructing the Web UI after the application has finished. +spark.eventLog.overwrite false Whether to overwrite any existing files. +spark.eventLog.buffer.kb 100k Buffer size to use when writing to output streams, in KiB unless otherwise specified. +spark.eventLog.rolling.enabled false Whether rolling over event log files is enabled. If set to true, it cuts down each event log file to the configured size. +spark.eventLog.rolling.maxFileSize 128m When spark.eventLog.rolling.enabled=true, specifies the max size of event log file before it's rolled over. +spark.ui.dagGraph.retainedRootRDDs Int.MaxValue How many DAG graph nodes the Spark UI and status APIs remember before garbage collecting. +spark.ui.enabled true Whether to run the web UI for the Spark application. +spark.ui.killEnabled true Allows jobs and stages to be killed from the web UI. +spark.ui.liveUpdate.period 100ms "How often to update live entities. -1 means ""never update"" when replaying applications, meaning only the last write will happen. For live applications, this avoids a few operations that we can live without when rapidly processing incoming task events." +spark.ui.liveUpdate.minFlushPeriod 1s Minimum time elapsed before stale UI data is flushed. This avoids UI staleness when incoming task events are not fired frequently. +spark.ui.port 4040 Port for your application's dashboard, which shows memory and workload data. +spark.ui.retainedJobs 1000 How many jobs the Spark UI and status APIs remember before garbage collecting. This is a target maximum, and fewer elements may be retained in some circumstances. +spark.ui.retainedStages 1000 How many stages the Spark UI and status APIs remember before garbage collecting. This is a target maximum, and fewer elements may be retained in some circumstances. +spark.ui.retainedTasks 100000 How many tasks in one stage the Spark UI and status APIs remember before garbage collecting. This is a target maximum, and fewer elements may be retained in some circumstances. +spark.ui.reverseProxy false Enable running Spark Master as reverse proxy for worker and application UIs. In this mode, Spark master will reverse proxy the worker and application UIs to enable access without requiring direct access to their hosts. Use it with caution, as worker and application UI will not be accessible directly, you will only be able to access them through spark master/proxy public URL. This setting affects all the workers and application UIs running in the cluster and must be set on all the workers, drivers and masters. +spark.ui.reverseProxyUrl (none) "If the Spark UI should be served through another front-end reverse proxy, this is the URL for accessing the Spark master UI through that reverse proxy. This is useful when running proxy for authentication e.g. an OAuth proxy. The URL may contain a path prefix, like http://mydomain.com/path/to/spark/, allowing you to serve the UI for multiple Spark clusters and other web applications through the same virtual host and port. Normally, this should be an absolute URL including scheme (http/https), host and port. It is possible to specify a relative URL starting with ""/"" here. In this case, all URLs generated by the Spark UI and Spark REST APIs will be server-relative links -- this will still work, as the entire Spark UI is served through the same host and port.The setting affects link generation in the Spark UI, but the front-end reverse proxy is responsible forstripping a path prefix before forwarding the request,rewriting redirects which point directly to the Spark master,redirecting access from http://mydomain.com/path/to/spark to http://mydomain.com/path/to/spark/ (trailing slash after path prefix); otherwise relative links on the master page do not work correctly.This setting affects all the workers and application UIs running in the cluster and must be set identically on all the workers, drivers and masters. In is only effective when spark.ui.reverseProxy is turned on. This setting is not needed when the Spark master web UI is directly reachable.Note that the value of the setting can't contain the keyword `proxy` or `history` after split by ""/"". Spark UI relies on both keywords for getting REST API endpoints from URIs." +spark.ui.proxyRedirectUri (none) Where to address redirects when Spark is running behind a proxy. This will make Spark modify redirect responses so they point to the proxy server, instead of the Spark UI's own address. This should be only the address of the server, without any prefix paths for the application; the prefix should be set either by the proxy server itself (by adding the X-Forwarded-Context request header), or by setting the proxy base in the Spark app's configuration. +spark.ui.showConsoleProgress false Show the progress bar in the console. The progress bar shows the progress of stages that run for longer than 500ms. If multiple stages run at the same time, multiple progress bars will be displayed on the same line.Note: In shell environment, the default value of spark.ui.showConsoleProgress is true. +spark.ui.custom.executor.log.url (none) Specifies custom spark executor log URL for supporting external log service instead of using cluster managers' application log URLs in Spark UI. Spark will support some path variables via patterns which can vary on cluster manager. Please check the documentation for your cluster manager to see which patterns are supported, if any.Please note that this configuration also replaces original log urls in event log, which will be also effective when accessing the application on history server. The new log urls must be permanent, otherwise you might have dead link for executor log urls.For now, only YARN mode supports this configuration +spark.worker.ui.retainedExecutors 1000 How many finished executors the Spark UI and status APIs remember before garbage collecting. +spark.worker.ui.retainedDrivers 1000 How many finished drivers the Spark UI and status APIs remember before garbage collecting. +spark.sql.ui.retainedExecutions 1000 How many finished executions the Spark UI and status APIs remember before garbage collecting. +spark.streaming.ui.retainedBatches 1000 How many finished batches the Spark UI and status APIs remember before garbage collecting. +spark.ui.retainedDeadExecutors 100 How many dead executors the Spark UI and status APIs remember before garbage collecting. +spark.ui.filters None Comma separated list of filter class names to apply to the Spark Web UI. The filter should be a standard javax servlet Filter. Filter parameters can also be specified in the configuration, by setting config entriesof the form spark.<class name of filter>.param.<param name>=<value> For example: spark.ui.filters=com.test.filter1 spark.com.test.filter1.param.name1=foo spark.com.test.filter1.param.name2=bar +spark.ui.requestHeaderSize 8k The maximum allowed size for a HTTP request header, in bytes unless otherwise specified. This setting applies for the Spark History Server too. +spark.ui.timelineEnabled true Whether to display event timeline data on UI pages. +spark.ui.timeline.executors.maximum 250 The maximum number of executors shown in the event timeline. +spark.ui.timeline.jobs.maximum 500 The maximum number of jobs shown in the event timeline. +spark.ui.timeline.stages.maximum 500 The maximum number of stages shown in the event timeline. +spark.ui.timeline.tasks.maximum 1000 The maximum number of tasks shown in the event timeline. +spark.appStatusStore.diskStoreDir None Local directory where to store diagnostic information of SQL executions. This configuration is only for live UI. +spark.broadcast.compress true Whether to compress broadcast variables before sending them. Generally a good idea. Compression will use spark.io.compression.codec. +spark.checkpoint.compress false Whether to compress RDD checkpoints. Generally a good idea. Compression will use spark.io.compression.codec. +spark.io.compression.codec lz4 The codec used to compress internal data such as RDD partitions, event log, broadcast variables and shuffle outputs. By default, Spark provides four codecs: lz4, lzf, snappy, and zstd. You can also use fully qualified class names to specify the codec, e.g. org.apache.spark.io.LZ4CompressionCodec, org.apache.spark.io.LZFCompressionCodec, org.apache.spark.io.SnappyCompressionCodec, and org.apache.spark.io.ZStdCompressionCodec. +spark.io.compression.lz4.blockSize 32k Block size used in LZ4 compression, in the case when LZ4 compression codec is used. Lowering this block size will also lower shuffle memory usage when LZ4 is used. Default unit is bytes, unless otherwise specified. This configuration only applies to `spark.io.compression.codec`. +spark.io.compression.snappy.blockSize 32k Block size in Snappy compression, in the case when Snappy compression codec is used. Lowering this block size will also lower shuffle memory usage when Snappy is used. Default unit is bytes, unless otherwise specified. This configuration only applies to `spark.io.compression.codec`. +spark.io.compression.zstd.level 1 Compression level for Zstd compression codec. Increasing the compression level will result in better compression at the expense of more CPU and memory. This configuration only applies to `spark.io.compression.codec`. +spark.io.compression.zstd.bufferSize 32k Buffer size in bytes used in Zstd compression, in the case when Zstd compression codec is used. Lowering this size will lower the shuffle memory usage when Zstd is used, but it might increase the compression cost because of excessive JNI call overhead. This configuration only applies to `spark.io.compression.codec`. +spark.kryo.classesToRegister (none) If you use Kryo serialization, give a comma-separated list of custom class names to register with Kryo. See the tuning guide for more details. +spark.kryo.referenceTracking true Whether to track references to the same object when serializing data with Kryo, which is necessary if your object graphs have loops and useful for efficiency if they contain multiple copies of the same object. Can be disabled to improve performance if you know this is not the case. +spark.kryo.registrationRequired false Whether to require registration with Kryo. If set to 'true', Kryo will throw an exception if an unregistered class is serialized. If set to false (the default), Kryo will write unregistered class names along with each object. Writing class names can cause significant performance overhead, so enabling this option can enforce strictly that a user has not omitted classes from registration. +spark.kryo.registrator (none) If you use Kryo serialization, give a comma-separated list of classes that register your custom classes with Kryo. This property is useful if you need to register your classes in a custom way, e.g. to specify a custom field serializer. Otherwise spark.kryo.classesToRegister is simpler. It should be set to classes that extend KryoRegistrator. See the tuning guide for more details. +spark.kryo.unsafe false Whether to use unsafe based Kryo serializer. Can be substantially faster by using Unsafe Based IO. +spark.kryoserializer.buffer.max 64m "Maximum allowable size of Kryo serialization buffer, in MiB unless otherwise specified. This must be larger than any object you attempt to serialize and must be less than 2048m. Increase this if you get a ""buffer limit exceeded"" exception inside Kryo." +spark.kryoserializer.buffer 64k Initial size of Kryo's serialization buffer, in KiB unless otherwise specified. Note that there will be one buffer per core on each worker. This buffer will grow up to spark.kryoserializer.buffer.max if needed. +spark.rdd.compress false Whether to compress serialized RDD partitions (e.g. for StorageLevel.MEMORY_ONLY_SER in Java and Scala or StorageLevel.MEMORY_ONLY in Python). Can save substantial space at the cost of some extra CPU time. Compression will use spark.io.compression.codec. +spark.serializer org.apache.spark.serializer.JavaSerializer Class to use for serializing objects that will be sent over the network or need to be cached in serialized form. The default of Java serialization works with any Serializable Java object but is quite slow, so we recommend using org.apache.spark.serializer.KryoSerializer and configuring Kryo serialization when speed is necessary. Can be any subclass of org.apache.spark.Serializer. +spark.serializer.objectStreamReset 100 When serializing using org.apache.spark.serializer.JavaSerializer, the serializer caches objects to prevent writing redundant data, however that stops garbage collection of those objects. By calling 'reset' you flush that info from the serializer, and allow old objects to be collected. To turn off this periodic reset set it to -1. By default it will reset the serializer every 100 objects. +spark.memory.fraction 0.6 Fraction of (heap space - 300MB) used for execution and storage. The lower this is, the more frequently spills and cached data eviction occur. The purpose of this config is to set aside memory for internal metadata, user data structures, and imprecise size estimation in the case of sparse, unusually large records. Leaving this at the default value is recommended. For more detail, including important information about correctly tuning JVM garbage collection when increasing this value, see this description. +spark.memory.storageFraction 0.5 Amount of storage memory immune to eviction, expressed as a fraction of the size of the region set aside by spark.memory.fraction. The higher this is, the less working memory may be available to execution and tasks may spill to disk more often. Leaving this at the default value is recommended. For more detail, see this description. +spark.memory.offHeap.enabled false If true, Spark will attempt to use off-heap memory for certain operations. If off-heap memory use is enabled, then spark.memory.offHeap.size must be positive. +spark.memory.offHeap.size 0 The absolute amount of memory which can be used for off-heap allocation, in bytes unless otherwise specified. This setting has no impact on heap memory usage, so if your executors' total memory consumption must fit within some hard limit then be sure to shrink your JVM heap size accordingly. This must be set to a positive value when spark.memory.offHeap.enabled=true. +spark.storage.unrollMemoryThreshold 1024 * 1024 Initial memory to request before unrolling any block. +spark.storage.replication.proactive false Enables proactive block replication for RDD blocks. Cached RDD block replicas lost due to executor failures are replenished if there are any existing available replicas. This tries to get the replication level of the block to the initial number. +spark.storage.localDiskByExecutors.cacheSize 1000 The max number of executors for which the local dirs are stored. This size is both applied for the driver and both for the executors side to avoid having an unbounded store. This cache will be used to avoid the network in case of fetching disk persisted RDD blocks or shuffle blocks (when spark.shuffle.readHostLocalDisk is set) from the same host. +spark.cleaner.periodicGC.interval 30min Controls how often to trigger a garbage collection.This context cleaner triggers cleanups only when weak references are garbage collected. In long-running applications with large driver JVMs, where there is little memory pressure on the driver, this may happen very occasionally or not at all. Not cleaning at all may lead to executors running out of disk space after a while. +spark.cleaner.referenceTracking true Enables or disables context cleaning. +spark.cleaner.referenceTracking.blocking true Controls whether the cleaning thread should block on cleanup tasks (other than shuffle, which is controlled by spark.cleaner.referenceTracking.blocking.shuffle Spark property). +spark.cleaner.referenceTracking.blocking.shuffle false Controls whether the cleaning thread should block on shuffle cleanup tasks. +spark.cleaner.referenceTracking.cleanCheckpoints false Controls whether to clean checkpoint files if the reference is out of scope. +spark.broadcast.blockSize 4m Size of each piece of a block for TorrentBroadcastFactory, in KiB unless otherwise specified. Too large a value decreases parallelism during broadcast (makes it slower); however, if it is too small, BlockManager might take a performance hit. +spark.broadcast.checksum true Whether to enable checksum for broadcast. If enabled, broadcasts will include a checksum, which can help detect corrupted blocks, at the cost of computing and sending a little more data. It's possible to disable it if the network has other mechanisms to guarantee data won't be corrupted during broadcast. +spark.broadcast.UDFCompressionThreshold 1 * 1024 * 1024 The threshold at which user-defined functions (UDFs) and Python RDD commands are compressed by broadcast in bytes unless otherwise specified. +spark.executor.cores 1 in YARN mode, all the available cores on the worker in standalone and Mesos coarse-grained modes. "The number of cores to use on each executor. In standalone and Mesos coarse-grained modes, for more detail, see this description." +spark.default.parallelism For distributed shuffle operations like reduceByKey and join, the largest number of partitions in a parent RDD. For operations like parallelize with no parent RDDs, it depends on the cluster manager: Local mode: number of cores on the local machine Mesos fine grained mode: 8 Others: total number of cores on all executor nodes or 2, whichever is larger Default number of partitions in RDDs returned by transformations like join, reduceByKey, and parallelize when not set by user. 0.5.0 +spark.executor.heartbeatInterval 10s Interval between each executor's heartbeats to the driver. Heartbeats let the driver know that the executor is still alive and update it with metrics for in-progress tasks. spark.executor.heartbeatInterval should be significantly less than spark.network.timeout +spark.files.fetchTimeout 60s Communication timeout to use when fetching files added through SparkContext.addFile() from the driver. +spark.files.useFetchCache true If set to true (default), file fetching will use a local cache that is shared by executors that belong to the same application, which can improve task launching performance when running many executors on the same host. If set to false, these caching optimizations will be disabled and all executors will fetch their own copies of files. This optimization may be disabled in order to use Spark local directories that reside on NFS filesystems (see SPARK-6313 for more details). +spark.files.overwrite false Whether to overwrite any files which exist at the startup. Users can not overwrite the files added by SparkContext.addFile or SparkContext.addJar before even if this option is set true. +spark.files.ignoreCorruptFiles false Whether to ignore corrupt files. If true, the Spark jobs will continue to run when encountering corrupted or non-existing files and contents that have been read will still be returned. +spark.files.ignoreMissingFiles false Whether to ignore missing files. If true, the Spark jobs will continue to run when encountering missing files and the contents that have been read will still be returned. +spark.files.maxPartitionBytes 134217728 (128 MiB) The maximum number of bytes to pack into a single partition when reading files. +spark.files.openCostInBytes 4194304 (4 MiB) The estimated cost to open a file, measured by the number of bytes could be scanned at the same time. This is used when putting multiple files into a partition. It is better to overestimate, then the partitions with small files will be faster than partitions with bigger files. +spark.hadoop.cloneConf false If set to true, clones a new Hadoop Configuration object for each task. This option should be enabled to work around Configuration thread-safety issues (see SPARK-2546 for more details). This is disabled by default in order to avoid unexpected performance regressions for jobs that are not affected by these issues. +spark.hadoop.validateOutputSpecs true If set to true, validates the output specification (e.g. checking if the output directory already exists) used in saveAsHadoopFile and other variants. This can be disabled to silence exceptions due to pre-existing output directories. We recommend that users do not disable this except if trying to achieve compatibility with previous versions of Spark. Simply use Hadoop's FileSystem API to delete output directories by hand. This setting is ignored for jobs generated through Spark Streaming's StreamingContext, since data may need to be rewritten to pre-existing output directories during checkpoint recovery. +spark.storage.memoryMapThreshold 2m Size of a block above which Spark memory maps when reading a block from disk. Default unit is bytes, unless specified otherwise. This prevents Spark from memory mapping very small blocks. In general, memory mapping has high overhead for blocks close to or below the page size of the operating system. +spark.storage.decommission.enabled false Whether to decommission the block manager when decommissioning executor. +spark.storage.decommission.shuffleBlocks.enabled true Whether to transfer shuffle blocks during block manager decommissioning. Requires a migratable shuffle resolver (like sort based shuffle). +spark.storage.decommission.shuffleBlocks.maxThreads 8 Maximum number of threads to use in migrating shuffle files. +spark.storage.decommission.rddBlocks.enabled true Whether to transfer RDD blocks during block manager decommissioning. +spark.storage.decommission.fallbackStorage.path (none) The location for fallback storage during block manager decommissioning. For example, s3a://spark-storage/. In case of empty, fallback storage is disabled. The storage should be managed by TTL because Spark will not clean it up. +spark.storage.decommission.fallbackStorage.cleanUp false If true, Spark cleans up its fallback storage data during shutting down. +spark.storage.decommission.shuffleBlocks.maxDiskSize (none) Maximum disk space to use to store shuffle blocks before rejecting remote shuffle blocks. Rejecting remote shuffle blocks means that an executor will not receive any shuffle migrations, and if there are no other executors available for migration then shuffle blocks will be lost unless spark.storage.decommission.fallbackStorage.path is configured. +spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version 1 The file output committer algorithm version, valid algorithm version number: 1 or 2. Note that 2 may cause a correctness issue like MAPREDUCE-7282. +spark.eventLog.logStageExecutorMetrics false Whether to write per-stage peaks of executor metrics (for each executor) to the event log. Note: The metrics are polled (collected) and sent in the executor heartbeat, and this is always done; this configuration is only to determine if aggregated metric peaks are written to the event log. +spark.executor.processTreeMetrics.enabled false Whether to collect process tree metrics (from the /proc filesystem) when collecting executor metrics. Note: The process tree metrics are collected only if the /proc filesystem exists. +spark.executor.metrics.pollingInterval 0 How often to collect executor metrics (in milliseconds). If 0, the polling is done on executor heartbeats (thus at the heartbeat interval, specified by spark.executor.heartbeatInterval). If positive, the polling is done at this interval. +spark.eventLog.gcMetrics.youngGenerationGarbageCollectors Copy,PS Scavenge,ParNew,G1 Young Generation Names of supported young generation garbage collector. A name usually is the return of GarbageCollectorMXBean.getName. The built-in young generation garbage collectors are Copy,PS Scavenge,ParNew,G1 Young Generation. +spark.eventLog.gcMetrics.oldGenerationGarbageCollectors MarkSweepCompact,PS MarkSweep,ConcurrentMarkSweep,G1 Old Generation Names of supported old generation garbage collector. A name usually is the return of GarbageCollectorMXBean.getName. The built-in old generation garbage collectors are MarkSweepCompact,PS MarkSweep,ConcurrentMarkSweep,G1 Old Generation. +spark.executor.metrics.fileSystemSchemes file,hdfs The file system schemes to report in executor metrics. +spark.rpc.message.maxSize 128 "Maximum message size (in MiB) to allow in ""control plane"" communication; generally only applies to map output size information sent between executors and the driver. Increase this if you are running jobs with many thousands of map and reduce tasks and see messages about the RPC message size." +spark.blockManager.port (random) Port for all block managers to listen on. These exist on both the driver and the executors. +spark.driver.blockManager.port (value of spark.blockManager.port) Driver-specific port for the block manager to listen on, for cases where it cannot use the same configuration as executors. +spark.driver.bindAddress (value of spark.driver.host) Hostname or IP address where to bind listening sockets. This config overrides the SPARK_LOCAL_IP environment variable (see below). It also allows a different address from the local one to be advertised to executors or external systems. This is useful, for example, when running containers with bridged networking. For this to properly work, the different ports used by the driver (RPC, block manager and UI) need to be forwarded from the container's host. +spark.driver.host (local hostname) Hostname or IP address for the driver. This is used for communicating with the executors and the standalone Master. +spark.driver.port (random) Port for the driver to listen on. This is used for communicating with the executors and the standalone Master. +spark.rpc.io.backLog 64 Length of the accept queue for the RPC server. For large applications, this value may need to be increased, so that incoming connections are not dropped when a large number of connections arrives in a short period of time. +spark.network.timeout 120s Default timeout for all network interactions. This config will be used in place of spark.storage.blockManagerHeartbeatTimeoutMs, spark.shuffle.io.connectionTimeout, spark.rpc.askTimeout or spark.rpc.lookupTimeout if they are not configured. +spark.network.io.preferDirectBufs true If enabled then off-heap buffer allocations are preferred by the shared allocators. Off-heap buffers are used to reduce garbage collection during shuffle and cache block transfer. For environments where off-heap memory is tightly limited, users may wish to turn this off to force all allocations to be on-heap. +spark.port.maxRetries 16 Maximum number of retries when binding to a port before giving up. When a port is given a specific value (non 0), each subsequent retry will increment the port used in the previous attempt by 1 before retrying. This essentially allows it to try a range of ports from the start port specified to port + maxRetries. +spark.rpc.askTimeout spark.network.timeout Duration for an RPC ask operation to wait before timing out. +spark.rpc.lookupTimeout 120s Duration for an RPC remote endpoint lookup operation to wait before timing out. +spark.network.maxRemoteBlockSizeFetchToMem 200m Remote block will be fetched to disk when size of the block is above this threshold in bytes. This is to avoid a giant request takes too much memory. Note this configuration will affect both shuffle fetch and block manager remote block fetch. For users who enabled external shuffle service, this feature can only work when external shuffle service is at least 2.3.0. +spark.rpc.io.connectionTimeout value of spark.network.timeout Timeout for the established connections between RPC peers to be marked as idled and closed if there are outstanding RPC requests but no traffic on the channel for at least `connectionTimeout`. +spark.cores.max (not set) "When running on a standalone deploy cluster or a Mesos cluster in ""coarse-grained"" sharing mode, the maximum amount of CPU cores to request for the application from across the cluster (not from each machine). If not set, the default will be spark.deploy.defaultCores on Spark's standalone cluster manager, or infinite (all available cores) on Mesos." +spark.locality.wait 3s How long to wait to launch a data-local task before giving up and launching it on a less-local node. The same wait will be used to step through multiple locality levels (process-local, node-local, rack-local and then any). It is also possible to customize the waiting time for each level by setting spark.locality.wait.node, etc. You should increase this setting if your tasks are long and see poor locality, but the default usually works well. +spark.locality.wait.node spark.locality.wait Customize the locality wait for node locality. For example, you can set this to 0 to skip node locality and search immediately for rack locality (if your cluster has rack information). +spark.locality.wait.process spark.locality.wait Customize the locality wait for process locality. This affects tasks that attempt to access cached data in a particular executor process. +spark.locality.wait.rack spark.locality.wait Customize the locality wait for rack locality. +spark.scheduler.maxRegisteredResourcesWaitingTime 30s Maximum amount of time to wait for resources to register before scheduling begins. +spark.scheduler.minRegisteredResourcesRatio 0.8 for KUBERNETES mode; 0.8 for YARN mode; 0.0 for standalone mode and Mesos coarse-grained mode The minimum ratio of registered resources (registered resources / total expected resources) (resources are executors in yarn mode and Kubernetes mode, CPU cores in standalone mode and Mesos coarse-grained mode ['spark.cores.max' value is total expected resources for Mesos coarse-grained mode] ) to wait for before scheduling begins. Specified as a double between 0.0 and 1.0. Regardless of whether the minimum ratio of resources has been reached, the maximum amount of time it will wait before scheduling begins is controlled by config spark.scheduler.maxRegisteredResourcesWaitingTime. +spark.scheduler.mode FIFO The scheduling mode between jobs submitted to the same SparkContext. Can be set to FAIR to use fair sharing instead of queueing jobs one after another. Useful for multi-user services. +spark.scheduler.revive.interval 1s The interval length for the scheduler to revive the worker resource offers to run tasks. +spark.scheduler.listenerbus.eventqueue.capacity 10000 The default capacity for event queues. Spark will try to initialize an event queue using capacity specified by `spark.scheduler.listenerbus.eventqueue.queueName.capacity` first. If it's not configured, Spark will use the default capacity specified by this config. Note that capacity must be greater than 0. Consider increasing value (e.g. 20000) if listener events are dropped. Increasing this value may result in the driver using more memory. +spark.scheduler.listenerbus.eventqueue.shared.capacity spark.scheduler.listenerbus.eventqueue.capacity Capacity for shared event queue in Spark listener bus, which hold events for external listener(s) that register to the listener bus. Consider increasing value, if the listener events corresponding to shared queue are dropped. Increasing this value may result in the driver using more memory. +spark.scheduler.listenerbus.eventqueue.appStatus.capacity spark.scheduler.listenerbus.eventqueue.capacity Capacity for appStatus event queue, which hold events for internal application status listeners. Consider increasing value, if the listener events corresponding to appStatus queue are dropped. Increasing this value may result in the driver using more memory. +spark.scheduler.listenerbus.eventqueue.executorManagement.capacity spark.scheduler.listenerbus.eventqueue.capacity Capacity for executorManagement event queue in Spark listener bus, which hold events for internal executor management listeners. Consider increasing value if the listener events corresponding to executorManagement queue are dropped. Increasing this value may result in the driver using more memory. +spark.scheduler.listenerbus.eventqueue.eventLog.capacity spark.scheduler.listenerbus.eventqueue.capacity Capacity for eventLog queue in Spark listener bus, which hold events for Event logging listeners that write events to eventLogs. Consider increasing value if the listener events corresponding to eventLog queue are dropped. Increasing this value may result in the driver using more memory. +spark.scheduler.listenerbus.eventqueue.streams.capacity spark.scheduler.listenerbus.eventqueue.capacity Capacity for streams queue in Spark listener bus, which hold events for internal streaming listener. Consider increasing value if the listener events corresponding to streams queue are dropped. Increasing this value may result in the driver using more memory. +spark.scheduler.resource.profileMergeConflicts false "If set to ""true"", Spark will merge ResourceProfiles when different profiles are specified in RDDs that get combined into a single stage. When they are merged, Spark chooses the maximum of each resource and creates a new ResourceProfile. The default of false results in Spark throwing an exception if multiple different ResourceProfiles are found in RDDs going into the same stage." +spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout 120s The timeout in seconds to wait to acquire a new executor and schedule a task before aborting a TaskSet which is unschedulable because all executors are excluded due to task failures. +spark.standalone.submit.waitAppCompletion false If set to true, Spark will merge ResourceProfiles when different profiles are specified in RDDs that get combined into a single stage. When they are merged, Spark chooses the maximum of each resource and creates a new ResourceProfile. The default of false results in Spark throwing an exception if multiple different ResourceProfiles are found in RDDs going into the same stage. +spark.excludeOnFailure.enabled false "If set to ""true"", prevent Spark from scheduling tasks on executors that have been excluded due to too many task failures. The algorithm used to exclude executors and nodes can be further controlled by the other ""spark.excludeOnFailure"" configuration options." +spark.excludeOnFailure.timeout 1h (Experimental) How long a node or executor is excluded for the entire application, before it is unconditionally removed from the excludelist to attempt running new tasks. +spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor 1 (Experimental) For a given task, how many times it can be retried on one executor before the executor is excluded for that task. +spark.excludeOnFailure.task.maxTaskAttemptsPerNode 2 (Experimental) For a given task, how many times it can be retried on one node, before the entire node is excluded for that task. +spark.excludeOnFailure.stage.maxFailedTasksPerExecutor 2 (Experimental) How many different tasks must fail on one executor, within one stage, before the executor is excluded for that stage. +spark.excludeOnFailure.stage.maxFailedExecutorsPerNode 2 (Experimental) How many different executors are marked as excluded for a given stage, before the entire node is marked as failed for the stage. +spark.excludeOnFailure.application.maxFailedTasksPerExecutor 2 (Experimental) How many different tasks must fail on one executor, in successful task sets, before the executor is excluded for the entire application. Excluded executors will be automatically added back to the pool of available resources after the timeout specified by spark.excludeOnFailure.timeout. Note that with dynamic allocation, though, the executors may get marked as idle and be reclaimed by the cluster manager. +spark.excludeOnFailure.application.maxFailedExecutorsPerNode 2 (Experimental) How many different executors must be excluded for the entire application, before the node is excluded for the entire application. Excluded nodes will be automatically added back to the pool of available resources after the timeout specified by spark.excludeOnFailure.timeout. Note that with dynamic allocation, though, the executors on the node may get marked as idle and be reclaimed by the cluster manager. +spark.excludeOnFailure.killExcludedExecutors false "(Experimental) If set to ""true"", allow Spark to automatically kill the executors when they are excluded on fetch failure or excluded for the entire application, as controlled by spark.killExcludedExecutors.application.*. Note that, when an entire node is added excluded, all of the executors on that node will be killed." +spark.excludeOnFailure.application.fetchFailure.enabled false "(Experimental) If set to ""true"", Spark will exclude the executor immediately when a fetch failure happens. If external shuffle service is enabled, then the whole node will be excluded." +spark.speculation false "If set to ""true"", performs speculative execution of tasks. This means if one or more tasks are running slowly in a stage, they will be re-launched." +spark.speculation.interval 100ms How often Spark will check for tasks to speculate. +spark.speculation.multiplier 1.5 How many times slower a task is than the median to be considered for speculation. +spark.speculation.quantile 0.75 Fraction of tasks which must be complete before speculation is enabled for a particular stage. +spark.speculation.minTaskRuntime 100ms Minimum amount of time a task runs before being considered for speculation. This can be used to avoid launching speculative copies of tasks that are very short. +spark.speculation.task.duration.threshold None Task duration after which scheduler would try to speculative run the task. If provided, tasks would be speculatively run if current stage contains less tasks than or equal to the number of slots on a single executor and the task is taking longer time than the threshold. This config helps speculate stage with very few tasks. Regular speculation configs may also apply if the executor slots are large enough. E.g. tasks might be re-launched if there are enough successful runs even though the threshold hasn't been reached. The number of slots is computed based on the conf values of spark.executor.cores and spark.task.cpus minimum 1. Default unit is bytes, unless otherwise specified. +spark.speculation.efficiency.processRateMultiplier 0.75 A multiplier that used when evaluating inefficient tasks. The higher the multiplier is, the more tasks will be possibly considered as inefficient. +spark.speculation.efficiency.longRunTaskFactor 2 A task will be speculated anyway as long as its duration has exceeded the value of multiplying the factor and the time threshold (either be spark.speculation.multiplier * successfulTaskDurations.median or spark.speculation.minTaskRuntime) regardless of it's data process rate is good or not. This avoids missing the inefficient tasks when task slow isn't related to data process rate. +spark.speculation.efficiency.enabled true When set to true, spark will evaluate the efficiency of task processing through the stage task metrics or its duration, and only need to speculate the inefficient tasks. A task is inefficient when 1)its data process rate is less than the average data process rate of all successful tasks in the stage multiplied by a multiplier or 2)its duration has exceeded the value of multiplying spark.speculation.efficiency.longRunTaskFactor and the time threshold (either be spark.speculation.multiplier * successfulTaskDurations.median or spark.speculation.minTaskRuntime). +spark.task.cpus 1 Number of cores to allocate for each task. +spark.task.resource.{resourceName}.amount 1 Amount of a particular resource type to allocate for each task, note that this can be a double. If this is specified you must also provide the executor config spark.executor.resource.{resourceName}.amount and any corresponding discovery configs so that your executors are created with that resource type. In addition to whole amounts, a fractional amount (for example, 0.25, which means 1/4th of a resource) may be specified. Fractional amounts must be less than or equal to 0.5, or in other words, the minimum amount of resource sharing is 2 tasks per resource. Additionally, fractional amounts are floored in order to assign resource slots (e.g. a 0.2222 configuration, or 1/0.2222 slots will become 4 tasks/resource, not 5). +spark.task.maxFailures 4 Number of continuous failures of any particular task before giving up on the job. The total number of failures spread across different tasks will not cause the job to fail; a particular task has to fail this number of attempts continuously. If any attempt succeeds, the failure count for the task will be reset. Should be greater than or equal to 1. Number of allowed retries = this value - 1. +spark.task.reaper.enabled false Enables monitoring of killed / interrupted tasks. When set to true, any task which is killed will be monitored by the executor until that task actually finishes executing. See the other spark.task.reaper.* configurations for details on how to control the exact behavior of this monitoring. When set to false (the default), task killing will use an older code path which lacks such monitoring. +spark.task.reaper.pollingInterval 10s When spark.task.reaper.enabled = true, this setting controls the frequency at which executors will poll the status of killed tasks. If a killed task is still running when polled then a warning will be logged and, by default, a thread-dump of the task will be logged (this thread dump can be disabled via the spark.task.reaper.threadDump setting, which is documented below). +spark.task.reaper.threadDump true When spark.task.reaper.enabled = true, this setting controls whether task thread dumps are logged during periodic polling of killed tasks. Set this to false to disable collection of thread dumps. +spark.task.reaper.killTimeout -1 When spark.task.reaper.enabled = true, this setting specifies a timeout after which the executor JVM will kill itself if a killed task has not stopped running. The default value, -1, disables this mechanism and prevents the executor from self-destructing. The purpose of this setting is to act as a safety-net to prevent runaway noncancellable tasks from rendering an executor unusable. +spark.stage.maxConsecutiveAttempts 4 Number of consecutive stage attempts allowed before a stage is aborted. +spark.stage.ignoreDecommissionFetchFailure false Whether ignore stage fetch failure caused by executor decommission when count spark.stage.maxConsecutiveAttempts +spark.barrier.sync.timeout 365d The timeout in seconds for each barrier() call from a barrier task. If the coordinator didn't receive all the sync messages from barrier tasks within the configured time, throw a SparkException to fail all the tasks. The default value is set to 31536000(3600 * 24 * 365) so the barrier() call shall wait for one year. +spark.scheduler.barrier.maxConcurrentTasksCheck.interval 15s Time in seconds to wait between a max concurrent tasks check failure and the next check. A max concurrent tasks check ensures the cluster can launch more concurrent tasks than required by a barrier stage on job submitted. The check can fail in case a cluster has just started and not enough executors have registered, so we wait for a little while and try to perform the check again. If the check fails more than a configured max failure times for a job then fail current job submission. Note this config only applies to jobs that contain one or more barrier stages, we won't perform the check on non-barrier jobs. +spark.scheduler.barrier.maxConcurrentTasksCheck.maxFailures 40 Number of max concurrent tasks check failures allowed before fail a job submission. A max concurrent tasks check ensures the cluster can launch more concurrent tasks than required by a barrier stage on job submitted. The check can fail in case a cluster has just started and not enough executors have registered, so we wait for a little while and try to perform the check again. If the check fails more than a configured max failure times for a job then fail current job submission. Note this config only applies to jobs that contain one or more barrier stages, we won't perform the check on non-barrier jobs. +spark.dynamicAllocation.enabled false Whether to use dynamic resource allocation, which scales the number of executors registered with this application up and down based on the workload. For more detail, see the description here. This requires spark.shuffle.service.enabled or spark.dynamicAllocation.shuffleTracking.enabled to be set. The following configurations are also relevant: spark.dynamicAllocation.minExecutors, spark.dynamicAllocation.maxExecutors, and spark.dynamicAllocation.initialExecutors spark.dynamicAllocation.executorAllocationRatio +spark.dynamicAllocation.executorIdleTimeout 60s If dynamic allocation is enabled and an executor has been idle for more than this duration, the executor will be removed. For more detail, see this description. +spark.dynamicAllocation.cachedExecutorIdleTimeout infinity If dynamic allocation is enabled and an executor which has cached data blocks has been idle for more than this duration, the executor will be removed. For more details, see this description. +spark.dynamicAllocation.initialExecutors spark.dynamicAllocation.minExecutors Initial number of executors to run if dynamic allocation is enabled. If `--num-executors` (or `spark.executor.instances`) is set and larger than this value, it will be used as the initial number of executors. +spark.dynamicAllocation.maxExecutors infinity Upper bound for the number of executors if dynamic allocation is enabled. +spark.dynamicAllocation.minExecutors 0 Lower bound for the number of executors if dynamic allocation is enabled. +spark.dynamicAllocation.executorAllocationRatio 1 By default, the dynamic allocation will request enough executors to maximize the parallelism according to the number of tasks to process. While this minimizes the latency of the job, with small tasks this setting can waste a lot of resources due to executor allocation overhead, as some executor might not even do any work. This setting allows to set a ratio that will be used to reduce the number of executors w.r.t. full parallelism. Defaults to 1.0 to give maximum parallelism. 0.5 will divide the target number of executors by 2 The target number of executors computed by the dynamicAllocation can still be overridden by the spark.dynamicAllocation.minExecutors and spark.dynamicAllocation.maxExecutors settings +spark.dynamicAllocation.schedulerBacklogTimeout 1s If dynamic allocation is enabled and there have been pending tasks backlogged for more than this duration, new executors will be requested. For more detail, see this description. +spark.dynamicAllocation.sustainedSchedulerBacklogTimeout schedulerBacklogTimeout Same as spark.dynamicAllocation.schedulerBacklogTimeout, but used only for subsequent executor requests. For more detail, see this description. +spark.dynamicAllocation.shuffleTracking.enabled true Enables shuffle file tracking for executors, which allows dynamic allocation without the need for an external shuffle service. This option will try to keep alive executors that are storing shuffle data for active jobs. +spark.dynamicAllocation.shuffleTracking.timeout infinity When shuffle tracking is enabled, controls the timeout for executors that are holding shuffle data. The default value means that Spark will rely on the shuffles being garbage collected to be able to release executors. If for some reason garbage collection is not cleaning up shuffles quickly enough, this option can be used to control when to time out executors even when they are storing shuffle data. +spark.{driver|executor}.rpc.io.serverThreads Fall back on spark.rpc.io.serverThreads Number of threads used in the server thread pool +spark.{driver|executor}.rpc.io.clientThreads Fall back on spark.rpc.io.clientThreads Number of threads used in the client thread pool +spark.{driver|executor}.rpc.netty.dispatcher.numThreads Fall back on spark.rpc.netty.dispatcher.numThreads Number of threads used in RPC message dispatcher thread pool +spark.streaming.backpressure.enabled false Enables or disables Spark Streaming's internal backpressure mechanism (since 1.5). This enables the Spark Streaming to control the receiving rate based on the current batch scheduling delays and processing times so that the system receives only as fast as the system can process. Internally, this dynamically sets the maximum receiving rate of receivers. This rate is upper bounded by the values spark.streaming.receiver.maxRate and spark.streaming.kafka.maxRatePerPartition if they are set (see below). +spark.streaming.backpressure.initialRate not set This is the initial maximum receiving rate at which each receiver will receive data for the first batch when the backpressure mechanism is enabled. +spark.streaming.blockInterval 200ms Interval at which data received by Spark Streaming receivers is chunked into blocks of data before storing them in Spark. Minimum recommended - 50 ms. See the performance tuning section in the Spark Streaming programming guide for more details. +spark.streaming.receiver.maxRate not set Maximum rate (number of records per second) at which each receiver will receive data. Effectively, each stream will consume at most this number of records per second. Setting this configuration to 0 or a negative number will put no limit on the rate. See the deployment guide in the Spark Streaming programming guide for mode details. +spark.streaming.receiver.writeAheadLog.enable false Enable write-ahead logs for receivers. All the input data received through receivers will be saved to write-ahead logs that will allow it to be recovered after driver failures. See the deployment guide in the Spark Streaming programming guide for more details. +spark.streaming.unpersist true Force RDDs generated and persisted by Spark Streaming to be automatically unpersisted from Spark's memory. The raw input data received by Spark Streaming is also automatically cleared. Setting this to false will allow the raw data and persisted RDDs to be accessible outside the streaming application as they will not be cleared automatically. But it comes at the cost of higher memory usage in Spark. +spark.streaming.stopGracefullyOnShutdown false If true, Spark shuts down the StreamingContext gracefully on JVM shutdown rather than immediately. +spark.streaming.kafka.maxRatePerPartition not set Maximum rate (number of records per second) at which data will be read from each Kafka partition when using the new Kafka direct stream API. See the Kafka Integration guide for more details. +spark.streaming.kafka.minRatePerPartition 1 Minimum rate (number of records per second) at which data will be read from each Kafka partition when using the new Kafka direct stream API. +spark.streaming.ui.retainedBatches 1000 How many batches the Spark Streaming UI and status APIs remember before garbage collecting. +spark.streaming.driver.writeAheadLog.closeFileAfterWrite false Whether to close the file after writing a write-ahead log record on the driver. Set this to 'true' when you want to use S3 (or any file system that does not support flushing) for the metadata WAL on the driver. +spark.streaming.receiver.writeAheadLog.closeFileAfterWrite false Whether to close the file after writing a write-ahead log record on the receivers. Set this to 'true' when you want to use S3 (or any file system that does not support flushing) for the data WAL on the receivers. +spark.r.numRBackendThreads 2 Number of threads used by RBackend to handle RPC calls from SparkR package. +spark.r.command Rscript Executable for executing R scripts in cluster modes for both driver and workers. +spark.r.driver.command spark.r.command Executable for executing R scripts in client modes for driver. Ignored in cluster modes. +spark.r.shell.command R Executable for executing sparkR shell in client modes for driver. Ignored in cluster modes. It is the same as environment variable SPARKR_DRIVER_R, but take precedence over it. spark.r.shell.command is used for sparkR shell while spark.r.driver.command is used for running R script. +spark.r.backendConnectionTimeout 6000 Connection timeout set by R process on its connection to RBackend in seconds. +spark.r.heartBeatInterval 100 Interval for heartbeats sent from SparkR backend to R process to prevent connection timeout. +spark.graphx.pregel.checkpointInterval -1 Checkpoint interval for graph and message in Pregel. It used to avoid stackOverflowError due to long lineage chains after lots of iterations. The checkpoint is disabled by default. +spark.deploy.recoveryMode NONE The recovery mode setting to recover submitted Spark jobs with cluster mode when it failed and relaunches. This is only applicable for cluster mode when running with Standalone or Mesos. +spark.deploy.zookeeper.url None When `spark.deploy.recoveryMode` is set to ZOOKEEPER, this configuration is used to set the zookeeper URL to connect to. +spark.deploy.zookeeper.dir None When `spark.deploy.recoveryMode` is set to ZOOKEEPER, this configuration is used to set the zookeeper directory to store recovery state. +spark.shuffle.push.server.mergedShuffleFileManagerImpl org.apache.spark.network.shuffle.NoOpMergedShuffleFileManager Class name of the implementation of MergedShuffleFileManager that manages push-based shuffle. This acts as a server side config to disable or enable push-based shuffle. By default, push-based shuffle is disabled at the server side. To enable push-based shuffle on the server side, set this config to org.apache.spark.network.shuffle.RemoteBlockPushResolver +spark.shuffle.push.server.minChunkSizeInMergedShuffleFile 2m The minimum size of a chunk when dividing a merged shuffle file into multiple chunks during push-based shuffle. A merged shuffle file consists of multiple small shuffle blocks. Fetching the complete merged shuffle file in a single disk I/O increases the memory requirements for both the clients and the external shuffle services. Instead, the external shuffle service serves the merged file in MB-sized chunks. This configuration controls how big a chunk can get. A corresponding index file for each merged shuffle file will be generated indicating chunk boundaries. Setting this too high would increase the memory requirements on both the clients and the external shuffle service. Setting this too low would increase the overall number of RPC requests to external shuffle service unnecessarily. +spark.shuffle.push.server.mergedIndexCacheSize 100m The maximum size of cache in memory which could be used in push-based shuffle for storing merged index files. This cache is in addition to the one configured via spark.shuffle.service.index.cache.size. +spark.shuffle.push.enabled false Set to true to enable push-based shuffle on the client side and works in conjunction with the server side flag spark.shuffle.push.server.mergedShuffleFileManagerImpl. +spark.shuffle.push.finalize.timeout 10s The amount of time driver waits in seconds, after all mappers have finished for a given shuffle map stage, before it sends merge finalize requests to remote external shuffle services. This gives the external shuffle services extra time to merge blocks. Setting this too long could potentially lead to performance regression. +spark.shuffle.push.maxRetainedMergerLocations 500 Maximum number of merger locations cached for push-based shuffle. Currently, merger locations are hosts of external shuffle services responsible for handling pushed blocks, merging them and serving merged blocks for later shuffle fetch. +spark.shuffle.push.mergersMinThresholdRatio 0.05 Ratio used to compute the minimum number of shuffle merger locations required for a stage based on the number of partitions for the reducer stage. For example, a reduce stage which has 100 partitions and uses the default value 0.05 requires at least 5 unique merger locations to enable push-based shuffle. +spark.shuffle.push.mergersMinStaticThreshold 5 The static threshold for number of shuffle push merger locations should be available in order to enable push-based shuffle for a stage. Note this config works in conjunction with spark.shuffle.push.mergersMinThresholdRatio. Maximum of spark.shuffle.push.mergersMinStaticThreshold and spark.shuffle.push.mergersMinThresholdRatio ratio number of mergers needed to enable push-based shuffle for a stage. For example: with 1000 partitions for the child stage with spark.shuffle.push.mergersMinStaticThreshold as 5 and spark.shuffle.push.mergersMinThresholdRatio set to 0.05, we would need at least 50 mergers to enable push-based shuffle for that stage. +spark.shuffle.push.numPushThreads (none) Specify the number of threads in the block pusher pool. These threads assist in creating connections and pushing blocks to remote external shuffle services. By default, the threadpool size is equal to the number of spark executor cores. +spark.shuffle.push.maxBlockSizeToPush 1m The max size of an individual block to push to the remote external shuffle services. Blocks larger than this threshold are not pushed to be merged remotely. These shuffle blocks will be fetched in the original manner.Setting this too high would result in more blocks to be pushed to remote external shuffle services but those are already efficiently fetched with the existing mechanisms resulting in additional overhead of pushing the large blocks to remote external shuffle services. It is recommended to set spark.shuffle.push.maxBlockSizeToPush lesser than spark.shuffle.push.maxBlockBatchSize config's value.Setting this too low would result in lesser number of blocks getting merged and directly fetched from mapper external shuffle service results in higher small random reads affecting overall disk I/O performance. +spark.shuffle.push.maxBlockBatchSize 3m The max size of a batch of shuffle blocks to be grouped into a single push request. Default is set to 3m in order to keep it slightly higher than spark.storage.memoryMapThreshold default which is 2m as it is very likely that each batch of block gets memory mapped which incurs higher overhead. +spark.shuffle.push.merge.finalizeThreads 8 Number of threads used by driver to finalize shuffle merge. Since it could potentially take seconds for a large shuffle to finalize, having multiple threads helps driver to handle concurrent shuffle merge finalize requests when push-based shuffle is enabled. +spark.shuffle.push.minShuffleSizeToWait 500m Driver will wait for merge finalization to complete only if total shuffle data size is more than this threshold. If total shuffle size is less, driver will immediately finalize the shuffle output. +spark.shuffle.push.minCompletedPushRatio 1.0 Fraction of minimum map partitions that should be push complete before driver starts shuffle merge finalization during push based shuffle. diff --git a/core/generate_ctest/ctest_mapping/ctests-spark-core.json b/core/generate_ctest/ctest_mapping/ctests-spark-core.json new file mode 100644 index 00000000..9a0e4044 --- /dev/null +++ b/core/generate_ctest/ctest_mapping/ctests-spark-core.json @@ -0,0 +1 @@ +{"spark.broadcast.UDFCompressionThreshold": [], "spark.checkpoint.compress": ["org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint"], "spark.cleaner.referenceTracking.cleanCheckpoints": ["org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]"], "spark.driver.log.allowErasureCoding": ["org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs"], "spark.driver.userClassPathFirst": ["org.apache.spark.deploy.SparkSubmitSuite @ start SparkApplication without modifying system properties"], "spark.eventLog.dir": ["org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log"], "spark.eventLog.logBlockUpdates.enabled": ["org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log"], "spark.eventLog.logStageExecutorMetrics": ["org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log"], "spark.eventLog.rolling.enabled": [], "spark.eventLog.rolling.maxFileSize": ["org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec None", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lz4)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(snappy)", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(zstd)", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(snappy)", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec None", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lz4)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lzf)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(zstd)", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lzf)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - the max size of event log file size less than lower limit"], "spark.excludeOnFailure.application.maxFailedExecutorsPerNode": ["org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants"], "spark.excludeOnFailure.application.maxFailedTasksPerExecutor": ["org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks"], "spark.excludeOnFailure.killExcludedExecutors": ["org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors"], "spark.excludeOnFailure.timeout": ["org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks"], "spark.executor.logs.rolling.enableCompression": ["org.apache.spark.util.FileAppenderSuite @ rolling file appender - time-based rolling", "org.apache.spark.util.FileAppenderSuite @ rolling file appender - time-based rolling (compressed)", "org.apache.spark.util.FileAppenderSuite @ rolling file appender - size-based rolling (compressed)", "org.apache.spark.util.FileAppenderSuite @ rolling file appender - cleaning", "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - time-based rolling close stream", "org.apache.spark.util.FileAppenderSuite @ file appender selection", "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - size-based rolling close stream", "org.apache.spark.util.FileAppenderSuite @ rolling file appender - size-based rolling"], "spark.files.overwrite": ["org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", "org.apache.spark.deploy.SparkSubmitSuite @ download remote resource if it is not supported by yarn service", "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", "org.apache.spark.deploy.SparkSubmitSuite @ downloadFile - invalid url", "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", "org.apache.spark.deploy.SparkSubmitSuite @ force download from forced schemes", "org.apache.spark.deploy.SparkSubmitSuite @ downloadFile - file doesn't exist", "org.apache.spark.SparkContextSuite @ addFile recursive works", "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", "org.apache.spark.deploy.SparkSubmitSuite @ Avoid re-upload remote resources in yarn client mode", "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", "org.apache.spark.deploy.SparkSubmitSuite @ download one file to local", "org.apache.spark.deploy.SparkSubmitSuite @ force download for all the schemes", "org.apache.spark.deploy.SparkSubmitSuite @ error informatively when mainClass isn't set and S3 JAR doesn't exist", "org.apache.spark.deploy.SparkSubmitSuite @ support --py-files/spark.submit.pyFiles in non pyspark application", "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", "org.apache.spark.deploy.SparkSubmitSuite @ avoid downloading remote resource if it is supported by yarn service", "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in client mode", "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", "org.apache.spark.deploy.SparkSubmitSuite @ download list of files to local"], "spark.files.useFetchCache": ["org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", "org.apache.spark.SparkContextSuite @ addFile recursive works", "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename"], "spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout": ["org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor"], "spark.shuffle.accurateBlockThreshold": ["org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", "org.apache.spark.scheduler.MapStatusSuite @ SPARK-22540: ensure HighlyCompressedMapStatus calculates correct avgSize"], "spark.shuffle.io.maxRetries": ["org.apache.spark.DistributedSuite @ caching (encryption = off)", "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", "org.apache.spark.DistributedSuite @ caching (encryption = on)", "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", "org.apache.spark.rdd.RDDSuite @ takeSample", "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password"], "spark.shuffle.registration.maxAttempts": ["org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working"], "spark.storage.decommission.fallbackStorage.cleanUp": ["org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", "org.apache.spark.storage.FallbackStorageSuite @ SPARK-34142: fallback storage API - cleanUp", "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage"], "spark.storage.decommission.rddBlocks.enabled": ["org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with only shuffle files time moves forward", "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager handles IO failures", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no migrations configured", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test shuffle and cached rdd migration without any error", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager does not re-add removed shuffle files", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no peers", "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test that with no blocks we finish migration", "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager short circuits removed blocks", "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit"], "spark.storage.decommission.shuffleBlocks.enabled": ["org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test that with no blocks we finish migration", "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no peers", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager does not re-add removed shuffle files", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with only shuffle files time moves forward", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager short circuits removed blocks", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no migrations configured", "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager handles IO failures", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test shuffle and cached rdd migration without any error", "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks"], "spark.storage.decommission.shuffleBlocks.maxThreads": ["org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager does not re-add removed shuffle files", "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test that with no blocks we finish migration", "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no peers", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager short circuits removed blocks", "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager handles IO failures", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test shuffle and cached rdd migration without any error", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with only shuffle files time moves forward"]} \ No newline at end of file diff --git a/core/generate_ctest/inject.py b/core/generate_ctest/inject.py index 75f5b443..fa2aa178 100644 --- a/core/generate_ctest/inject.py +++ b/core/generate_ctest/inject.py @@ -1,5 +1,6 @@ """inject parameter, values into sw config""" +import shutil import sys import xml.etree.ElementTree as ET @@ -35,6 +36,28 @@ def inject_config(param_value_pairs): file.write(str.encode("\n\n")) file.write(ET.tostring(conf)) file.close() + elif project in [SPARK]: + for inject_path in INJECTION_PATH[project]: + back_up = inject_path + "/back_up.xml" + inject_path = inject_path + "/pom.xml" + shutil.copyfile(inject_path, back_up) + print(">>>>[ctest_core] injecting into file: {}".format(inject_path)) + tree = ET.parse(inject_path) + pom = tree.getroot() + namespace = pom.tag.split('{')[1].split('}')[0] + # for reading + namespace_mapping = {'mvnns': namespace} + # for writing: otherwise 'xmlns:ns0' will be used instead of the standard xml namespace 'xmlns' + ET.register_namespace('', namespace) + ns = "{http://maven.apache.org/POM/4.0.0}" + for child in pom.findall("%sbuild/%spluginManagement/%splugins/%splugin" % (ns, ns, ns, ns)): + gid = child.find("%sgroupId" % ns) + if gid.text == "org.scalatest": + child = child.find("%sconfiguration/%ssystemProperties" % (ns, ns)) + for p, v in param_value_pairs.items(): + sub = ET.SubElement(child, '%s%s' % (ns, p)) + sub.text = v + tree.write(inject_path, encoding='utf-8') else: sys.exit(">>>>[ctest_core] value injection for {} is not supported yet".format(project)) @@ -53,5 +76,10 @@ def clean_conf_file(project): file.write(str.encode("\n\n")) file.write(ET.tostring(conf)) file.close() + elif project in [SPARK]: + for inject_path in INJECTION_PATH[project]: + back_up = inject_path + "/back_up.xml" + inject_path = inject_path + "/pom.xml" + shutil.copyfile(back_up, inject_path) else: sys.exit(">>>>[ctest_core] value injection for {} is not supported yet".format(project)) diff --git a/core/generate_ctest/main.py b/core/generate_ctest/main.py index a53169d6..4088e468 100644 --- a/core/generate_ctest/main.py +++ b/core/generate_ctest/main.py @@ -31,9 +31,8 @@ def test_value_pair(test_input): for param, values in test_input.items(): tr_file = open(os.path.join(GENCTEST_TR_DIR, project, TR_FILE.format(id=param)), "w") mt_file = open(os.path.join(GENCTEST_TR_DIR, project, MT_FILE.format(id=param)), "w") - associated_tests = mapping[param] if param in mapping else [] - if len(mapping[param]) != 0: + if len(associated_tests) != 0: for value in values: tr = run_test_seperate(param, value, associated_tests) diff --git a/core/generate_ctest/program_input.py b/core/generate_ctest/program_input.py index 9147868a..7a21f25a 100644 --- a/core/generate_ctest/program_input.py +++ b/core/generate_ctest/program_input.py @@ -4,11 +4,11 @@ # run mode "run_mode": "generate_ctest", # string # name of the project, i.e. hadoop-common, hadoop-hdfs, see constant.py - "project": "hadoop-common", # string + "project": "spark-core", # string # path to param -> tests json mapping - "mapping_path": "../../data/ctest_mapping/opensource-hadoop-common.json", # string + "mapping_path": "../../data/ctest_mapping/opensource-spark-core.json", # string # good values of params tests will be run against - "param_value_tsv": "sample-hadoop-common.tsv", # string + "param_value_tsv": "spark-core-generated-values.tsv", # string # display the terminal output live, without saving any results "display_mode": False, # bool # whether to use mvn test or mvn surefire:test diff --git a/core/generate_ctest/run_test.py b/core/generate_ctest/run_test.py index 63847227..417157ed 100644 --- a/core/generate_ctest/run_test.py +++ b/core/generate_ctest/run_test.py @@ -25,7 +25,7 @@ def run_test_seperate(param, value, associated_tests): print(">>>>[ctest_core] chdir to {}".format(testing_dir)) start_time = time.time() for test in associated_tests: - cmd = run_test_utils.maven_cmd(test) + cmd = run_test_utils.maven_cmd(test, project=project) if display_mode: os.system(" ".join(cmd)) continue @@ -40,7 +40,10 @@ def run_test_seperate(param, value, associated_tests): # test hanged, treated as failure. process.kill() print(">>>>[ctest_core] maven cmd timeout {}".format(e)) - clsname, testname = test.split("#") + if project in [SPARK]: + clsname, testname = test.split(" @ ") + else: + clsname, testname = test.split("#") tr.ran_tests_and_time.add(test + "\t" + str(cmd_timeout)) tr.failed_tests.add(test) continue @@ -49,7 +52,10 @@ def run_test_seperate(param, value, associated_tests): print_output = run_test_utils.strip_ansi(stdout.decode("ascii", "ignore")) print(print_output) - clsname, testname = test.split("#") + if project in [SPARK]: + clsname, testname = test.split(" @ ") + else: + clsname, testname = test.split("#") times, errors = parse_surefire(clsname, [testname]) if testname in times: tr.ran_tests_and_time.add(test + "\t" + times[testname]) diff --git a/core/generate_ctest/run_test_utils.py b/core/generate_ctest/run_test_utils.py index 6ac063c8..e51bb7da 100644 --- a/core/generate_ctest/run_test_utils.py +++ b/core/generate_ctest/run_test_utils.py @@ -14,11 +14,16 @@ def __init__(self, ran_tests_and_time=set(), failed_tests=set()): self.ran_tests_and_time = ran_tests_and_time -def maven_cmd(test, add_time=False): +def maven_cmd(test, add_time=False, project=None): # surefire:test reuses test build from last compilation # if you modified the test and want to rerun it, you must use `mvn test` - test_mode = "surefire:test" if use_surefire else "test" - cmd = ["mvn", test_mode, "-Dtest={}".format(test)] + maven_args + cmd = None + if project == SPARK: + test_mode = "scalatest:test" if use_surefire else "test" + cmd = ["mvn", test_mode, "-Dtest=none", "-Dsuites=" + test] + maven_args + else: + test_mode = "surefire:test" if use_surefire else "test" + cmd = ["mvn", test_mode, "-Dtest={}".format(test)] + maven_args if add_time: cmd = ["time"] + cmd print(">>>>[ctest_core] command: " + " ".join(cmd)) diff --git a/core/generate_ctest/spark-core-generated-values.tsv b/core/generate_ctest/spark-core-generated-values.tsv new file mode 100644 index 00000000..52d6de9b --- /dev/null +++ b/core/generate_ctest/spark-core-generated-values.tsv @@ -0,0 +1,30 @@ +spark.driver.log.allowErasureCoding true SKIP +spark.driver.userClassPathFirst true SKIP +spark.executor.logs.rolling.enableCompression true SKIP +spark.shuffle.io.maxRetries 1 6 +spark.shuffle.accurateBlockThreshold T OOM +spark.shuffle.registration.maxAttempts 1 6 +spark.driver.userClassPathFirst true SKIP +spark.executor.logs.rolling.enableCompression true SKIP +spark.shuffle.io.maxRetries 1 6 +spark.shuffle.accurateBlockThreshold T OOM +spark.shuffle.registration.maxAttempts 1 6 +spark.eventLog.logBlockUpdates.enabled true SKIP +spark.eventLog.dir /valid/dir1 /valid/dir2 +spark.eventLog.rolling.enabled true SKIP +spark.eventLog.rolling.maxFileSize 1m 256m +spark.checkpoint.compress true SKIP +spark.cleaner.referenceTracking.cleanCheckpoints true SKIP +spark.broadcast.UDFCompressionThreshold RDD SKIP +spark.files.useFetchCache false SKIP +spark.files.overwrite true SKIP +spark.storage.decommission.shuffleBlocks.enabled false SKIP +spark.storage.decommission.shuffleBlocks.maxThreads 1 16 +spark.storage.decommission.rddBlocks.enabled false SKIP +spark.storage.decommission.fallbackStorage.cleanUp true SKIP +spark.eventLog.logStageExecutorMetrics true SKIP +spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout 1s 240s +spark.excludeOnFailure.timeout 10h 2h +spark.excludeOnFailure.application.maxFailedTasksPerExecutor 1 4 +spark.excludeOnFailure.application.maxFailedExecutorsPerNode 1 4 +spark.excludeOnFailure.killExcludedExecutors true SKIP diff --git a/core/generate_value/spark-core-generated-values.tsv b/core/generate_value/spark-core-generated-values.tsv new file mode 100644 index 00000000..1015dbb2 --- /dev/null +++ b/core/generate_value/spark-core-generated-values.tsv @@ -0,0 +1,361 @@ +spark.app.name SKIP SKIP +spark.driver.cores 0 2 +spark.driver.maxResultSize SKIP SKIP +spark.driver.memory SKIP SKIP +spark.driver.memoryOverheadFactor 0.05 0.2 +spark.driver.resource.{resourceName}.amount 1 -1 +spark.driver.resource.{resourceName}.discoveryScript SKIP SKIP +spark.driver.resource.{resourceName}.vendor SKIP SKIP +spark.resources.discoveryPlugin SKIP SKIP +spark.executor.memory SKIP SKIP +spark.executor.pyspark.memory SKIP SKIP +spark.executor.memoryOverheadFactor 0.05 0.2 +spark.executor.resource.{resourceName}.amount 1 -1 +spark.executor.resource.{resourceName}.discoveryScript SKIP SKIP +spark.executor.resource.{resourceName}.vendor SKIP SKIP +spark.extraListeners SKIP SKIP +spark.local.dir /valid/file1 /valid/file2 +spark.logConf true SKIP +spark.master SKIP SKIP +spark.submit.deployMode SKIP SKIP +spark.log.callerContext SKIP SKIP +spark.driver.supervise true SKIP +spark.driver.log.dfsDir SKIP SKIP +spark.driver.log.persistToDfs.enabled true SKIP +spark.driver.log.layout SKIP SKIP +spark.driver.log.allowErasureCoding true SKIP +spark.driver.extraClassPath SKIP SKIP +spark.driver.defaultJavaOptions SKIP SKIP +spark.driver.extraJavaOptions SKIP SKIP +spark.driver.extraLibraryPath SKIP SKIP +spark.driver.userClassPathFirst true SKIP +spark.executor.extraClassPath SKIP SKIP +spark.executor.defaultJavaOptions SKIP SKIP +spark.executor.extraJavaOptions SKIP SKIP +spark.executor.extraLibraryPath SKIP SKIP +spark.executor.logs.rolling.maxRetainedFiles SKIP SKIP +spark.executor.logs.rolling.enableCompression true SKIP +spark.executor.logs.rolling.maxSize SKIP SKIP +spark.executor.logs.rolling.strategy SKIP SKIP +spark.executor.logs.rolling.time.interval hourly minutely +spark.executor.userClassPathFirst true SKIP +spark.executorEnv.[EnvironmentVariableName] SKIP SKIP +spark.redaction.regex SKIP SKIP +spark.python.profile true SKIP +spark.python.profile.dump SKIP SKIP +spark.python.worker.memory 1m 1024m +spark.python.worker.reuse false SKIP +spark.files SKIP SKIP +spark.submit.pyFiles SKIP SKIP +spark.jars SKIP SKIP +spark.jars.packages SKIP SKIP +spark.jars.excludes SKIP SKIP +spark.jars.ivy SKIP SKIP +spark.jars.ivySettings SKIP SKIP +spark.jars.repositories SKIP SKIP +spark.archives SKIP SKIP +spark.pyspark.driver.python SKIP SKIP +spark.pyspark.python SKIP SKIP +spark.reducer.maxSizeInFlight 1m 96m +spark.reducer.maxReqsInFlight SKIP SKIP +spark.reducer.maxBlocksInFlightPerAddress SKIP SKIP +spark.shuffle.compress false SKIP +spark.shuffle.file.buffer SKIP SKIP +spark.shuffle.io.maxRetries 1 6 +spark.shuffle.io.numConnectionsPerPeer 0 2 +spark.shuffle.io.preferDirectBufs false SKIP +spark.shuffle.io.retryWait 1s 10s +spark.shuffle.io.backLog 0 -2 +spark.shuffle.io.connectionTimeout SKIP SKIP +spark.shuffle.service.enabled true SKIP +spark.shuffle.service.port 3000 3001 +spark.shuffle.service.index.cache.size 1m 200m +spark.shuffle.service.removeShuffle true SKIP +spark.shuffle.maxChunksBeingTransferred SKIP SKIP +spark.shuffle.sort.bypassMergeThreshold 100 400 +spark.shuffle.spill.compress false SKIP +spark.shuffle.accurateBlockThreshold T OOM +spark.shuffle.registration.timeout 2500 10000 +spark.shuffle.registration.maxAttempts 1 6 +spark.files.io.connectionTimeout SKIP SKIP +spark.shuffle.checksum.enabled false SKIP +spark.shuffle.checksum.algorithm JDK CRC32 +spark.shuffle.service.fetch.rdd.enabled true SKIP +spark.driver.extraClassPath SKIP SKIP +spark.driver.defaultJavaOptions SKIP SKIP +spark.driver.extraJavaOptions SKIP SKIP +spark.driver.extraLibraryPath SKIP SKIP +spark.driver.userClassPathFirst true SKIP +spark.executor.extraClassPath SKIP SKIP +spark.executor.defaultJavaOptions SKIP SKIP +spark.executor.extraJavaOptions SKIP SKIP +spark.executor.extraLibraryPath SKIP SKIP +spark.executor.logs.rolling.maxRetainedFiles SKIP SKIP +spark.executor.logs.rolling.enableCompression true SKIP +spark.executor.logs.rolling.maxSize SKIP SKIP +spark.executor.logs.rolling.strategy SKIP SKIP +spark.executor.logs.rolling.time.interval hourly minutely +spark.executor.userClassPathFirst true SKIP +spark.executorEnv.[EnvironmentVariableName] SKIP SKIP +spark.redaction.regex SKIP SKIP +spark.redaction.string.regex SKIP SKIP +spark.python.profile true SKIP +spark.python.profile.dump SKIP SKIP +spark.python.worker.memory 1m 1024m +spark.python.worker.reuse false SKIP +spark.files SKIP SKIP +spark.submit.pyFiles SKIP SKIP +spark.jars SKIP SKIP +spark.jars.packages SKIP SKIP +spark.jars.excludes SKIP SKIP +spark.jars.ivy SKIP SKIP +spark.jars.ivySettings SKIP SKIP +spark.jars.repositories SKIP SKIP +spark.archives SKIP SKIP +spark.pyspark.driver.python SKIP SKIP +spark.pyspark.python SKIP SKIP +spark.reducer.maxSizeInFlight 1m 96m +spark.reducer.maxReqsInFlight SKIP SKIP +spark.reducer.maxBlocksInFlightPerAddress SKIP SKIP +spark.shuffle.compress false SKIP +spark.shuffle.file.buffer SKIP SKIP +spark.shuffle.unsafe.file.output.buffer SKIP SKIP +spark.shuffle.spill.diskWriteBufferSize SKIP SKIP +spark.shuffle.io.maxRetries 1 6 +spark.shuffle.io.numConnectionsPerPeer 0 2 +spark.shuffle.io.preferDirectBufs false SKIP +spark.shuffle.io.retryWait 1s 10s +spark.shuffle.io.backLog 0 -2 +spark.shuffle.io.connectionTimeout SKIP SKIP +spark.shuffle.service.enabled true SKIP +spark.shuffle.service.port 3000 3001 +spark.shuffle.service.name true SKIP +spark.shuffle.service.index.cache.size 1m 200m +spark.shuffle.service.removeShuffle true SKIP +spark.shuffle.maxChunksBeingTransferred SKIP SKIP +spark.shuffle.sort.bypassMergeThreshold 100 400 +spark.shuffle.sort.io.plugin.class SKIP SKIP +spark.shuffle.spill.compress false SKIP +spark.shuffle.accurateBlockThreshold T OOM +spark.shuffle.registration.timeout 2500 10000 +spark.shuffle.registration.maxAttempts 1 6 +spark.shuffle.reduceLocality.enabled false SKIP +spark.shuffle.mapOutput.minSizeForBroadcast SKIP SKIP +spark.shuffle.detectCorrupt false SKIP +spark.shuffle.detectCorrupt.useExtraMemory true SKIP +spark.shuffle.useOldFetchProtocol true SKIP +spark.shuffle.readHostLocalDisk false SKIP +spark.files.io.connectionTimeout SKIP SKIP +spark.shuffle.checksum.enabled false SKIP +spark.shuffle.checksum.algorithm JDK CRC32 +spark.shuffle.service.fetch.rdd.enabled true SKIP +spark.shuffle.service.db.enabled false SKIP +spark.shuffle.service.db.backend ROCKSDB SKIP +spark.eventLog.logBlockUpdates.enabled true SKIP +spark.eventLog.longForm.enabled true SKIP +spark.eventLog.compress true SKIP +spark.eventLog.compression.codec SKIP SKIP +spark.eventLog.erasureCoding.enabled true SKIP +spark.eventLog.dir /valid/dir1 /valid/dir2 +spark.eventLog.enabled true SKIP +spark.eventLog.overwrite true SKIP +spark.eventLog.buffer.kb SKIP SKIP +spark.eventLog.rolling.enabled true SKIP +spark.eventLog.rolling.maxFileSize 1m 256m +spark.ui.dagGraph.retainedRootRDDs SKIP SKIP +spark.ui.enabled false SKIP +spark.ui.killEnabled false SKIP +spark.ui.liveUpdate.period 1ms 200ms +spark.ui.liveUpdate.minFlushPeriod 10s 2s +spark.ui.port 3000 3001 +spark.ui.retainedJobs 500 2000 +spark.ui.retainedStages 500 2000 +spark.ui.retainedTasks 50000 200000 +spark.ui.reverseProxy true SKIP +spark.ui.reverseProxyUrl SKIP SKIP +spark.ui.proxyRedirectUri SKIP SKIP +spark.ui.showConsoleProgress true SKIP +spark.ui.custom.executor.log.url SKIP SKIP +spark.worker.ui.retainedExecutors 500 2000 +spark.worker.ui.retainedDrivers 500 2000 +spark.sql.ui.retainedExecutions 500 2000 +spark.streaming.ui.retainedBatches 500 2000 +spark.ui.retainedDeadExecutors 50 200 +spark.ui.filters SKIP SKIP +spark.ui.requestHeaderSize SKIP SKIP +spark.ui.timelineEnabled false SKIP +spark.ui.timeline.executors.maximum 125 500 +spark.ui.timeline.jobs.maximum 250 1000 +spark.ui.timeline.stages.maximum 250 1000 +spark.ui.timeline.tasks.maximum 500 2000 +spark.appStatusStore.diskStoreDir SKIP SKIP +spark.broadcast.compress false SKIP +spark.checkpoint.compress true SKIP +spark.io.compression.codec SKIP SKIP +spark.io.compression.lz4.blockSize SKIP SKIP +spark.io.compression.snappy.blockSize SKIP SKIP +spark.io.compression.zstd.level 0 2 +spark.io.compression.zstd.bufferSize SKIP SKIP +spark.kryo.classesToRegister SKIP SKIP +spark.kryo.referenceTracking false SKIP +spark.kryo.registrationRequired true SKIP +spark.kryo.registrator SKIP SKIP +spark.kryo.unsafe true SKIP +spark.kryoserializer.buffer.max 1m 128m +spark.kryoserializer.buffer SKIP SKIP +spark.rdd.compress true SKIP +spark.serializer SKIP SKIP +spark.serializer.objectStreamReset 50 200 +spark.memory.fraction 0.3 1.2 +spark.memory.storageFraction 0.25 1.0 +spark.memory.offHeap.enabled true SKIP +spark.memory.offHeap.size 1 -1 +spark.storage.unrollMemoryThreshold SKIP SKIP +spark.storage.replication.proactive true SKIP +spark.storage.localDiskByExecutors.cacheSize 500 2000 +spark.cleaner.periodicGC.interval 1min 60min +spark.cleaner.referenceTracking false SKIP +spark.cleaner.referenceTracking.blocking false SKIP +spark.cleaner.referenceTracking.blocking.shuffle true SKIP +spark.cleaner.referenceTracking.cleanCheckpoints true SKIP +spark.broadcast.blockSize 1m 8m +spark.broadcast.checksum false SKIP +spark.broadcast.UDFCompressionThreshold RDD SKIP +spark.executor.heartbeatInterval 1s 20s +spark.files.fetchTimeout 1s 120s +spark.files.useFetchCache false SKIP +spark.files.overwrite true SKIP +spark.files.ignoreCorruptFiles true SKIP +spark.files.ignoreMissingFiles true SKIP +spark.files.maxPartitionBytes SKIP SKIP +spark.files.openCostInBytes SKIP SKIP +spark.hadoop.cloneConf true SKIP +spark.hadoop.validateOutputSpecs false SKIP +spark.storage.memoryMapThreshold 1m 4m +spark.storage.decommission.enabled true SKIP +spark.storage.decommission.shuffleBlocks.enabled false SKIP +spark.storage.decommission.shuffleBlocks.maxThreads 1 16 +spark.storage.decommission.rddBlocks.enabled false SKIP +spark.storage.decommission.fallbackStorage.path /valid/file1 /valid/file2 +spark.storage.decommission.fallbackStorage.cleanUp true SKIP +spark.storage.decommission.shuffleBlocks.maxDiskSize SKIP SKIP +spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version 0 2 +spark.eventLog.logStageExecutorMetrics true SKIP +spark.executor.processTreeMetrics.enabled true SKIP +spark.executor.metrics.pollingInterval 1 -1 +spark.eventLog.gcMetrics.youngGenerationGarbageCollectors Copy PS Scavenge +spark.eventLog.gcMetrics.oldGenerationGarbageCollectors MarkSweepCompact PS MarkSweep +spark.executor.metrics.fileSystemSchemes file hdfs +spark.rpc.message.maxSize 64 256 +spark.blockManager.port SKIP SKIP +spark.driver.blockManager.port SKIP SKIP +spark.driver.bindAddress SKIP SKIP +spark.driver.host SKIP SKIP +spark.driver.port SKIP SKIP +spark.rpc.io.backLog 32 128 +spark.network.timeout 1s 240s +spark.network.io.preferDirectBufs false SKIP +spark.port.maxRetries 8 32 +spark.rpc.askTimeout SKIP SKIP +spark.rpc.lookupTimeout 1s 240s +spark.network.maxRemoteBlockSizeFetchToMem 1m 400m +spark.rpc.io.connectionTimeout SKIP SKIP +spark.cores.max SKIP SKIP +spark.locality.wait 1s 6s +spark.locality.wait.node SKIP SKIP +spark.locality.wait.process SKIP SKIP +spark.locality.wait.rack SKIP SKIP +spark.scheduler.maxRegisteredResourcesWaitingTime 1s 60s +spark.scheduler.minRegisteredResourcesRatio SKIP SKIP +spark.scheduler.mode FAIR SKIP +spark.scheduler.revive.interval 10s 2s +spark.scheduler.listenerbus.eventqueue.capacity 5000 20000 +spark.scheduler.listenerbus.eventqueue.shared.capacity SKIP SKIP +spark.scheduler.listenerbus.eventqueue.appStatus.capacity SKIP SKIP +spark.scheduler.listenerbus.eventqueue.executorManagement.capacity SKIP SKIP +spark.scheduler.listenerbus.eventqueue.eventLog.capacity SKIP SKIP +spark.scheduler.listenerbus.eventqueue.streams.capacity SKIP SKIP +spark.scheduler.resource.profileMergeConflicts true SKIP +spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout 1s 240s +spark.standalone.submit.waitAppCompletion true SKIP +spark.excludeOnFailure.enabled true SKIP +spark.excludeOnFailure.timeout 10h 2h +spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor 0 2 +spark.excludeOnFailure.task.maxTaskAttemptsPerNode 1 4 +spark.excludeOnFailure.stage.maxFailedTasksPerExecutor 1 4 +spark.excludeOnFailure.stage.maxFailedExecutorsPerNode 1 4 +spark.excludeOnFailure.application.maxFailedTasksPerExecutor 1 4 +spark.excludeOnFailure.application.maxFailedExecutorsPerNode 1 4 +spark.excludeOnFailure.killExcludedExecutors true SKIP +spark.excludeOnFailure.application.fetchFailure.enabled true SKIP +spark.speculation true SKIP +spark.speculation.interval 1ms 200ms +spark.speculation.multiplier 0.75 3.0 +spark.speculation.quantile 0.375 1.5 +spark.speculation.minTaskRuntime 1ms 200ms +spark.speculation.task.duration.threshold SKIP SKIP +spark.speculation.efficiency.processRateMultiplier 0.375 1.5 +spark.speculation.efficiency.longRunTaskFactor 1 4 +spark.speculation.efficiency.enabled false SKIP +spark.task.cpus 0 2 +spark.task.resource.{resourceName}.amount 0 2 +spark.task.maxFailures 1 8 +spark.task.reaper.enabled true SKIP +spark.task.reaper.pollingInterval 1s 20s +spark.task.reaper.threadDump false SKIP +spark.task.reaper.killTimeout 0 -2 +spark.stage.maxConsecutiveAttempts 1 8 +spark.stage.ignoreDecommissionFetchFailure true SKIP +spark.barrier.sync.timeout 1d 730d +spark.scheduler.barrier.maxConcurrentTasksCheck.interval 1s 30s +spark.scheduler.barrier.maxConcurrentTasksCheck.maxFailures 20 80 +spark.dynamicAllocation.enabled true SKIP +spark.dynamicAllocation.executorIdleTimeout 1s 120s +spark.dynamicAllocation.cachedExecutorIdleTimeout inf inf +spark.dynamicAllocation.initialExecutors SKIP SKIP +spark.dynamicAllocation.maxExecutors inf inf +spark.dynamicAllocation.minExecutors 1 -1 +spark.dynamicAllocation.executorAllocationRatio 0 2 +spark.dynamicAllocation.schedulerBacklogTimeout 10s 2s +spark.dynamicAllocation.sustainedSchedulerBacklogTimeout SKIP SKIP +spark.dynamicAllocation.shuffleTracking.enabled false SKIP +spark.dynamicAllocation.shuffleTracking.timeout inf inf +spark.{driver|executor}.rpc.io.serverThreads SKIP SKIP +spark.{driver|executor}.rpc.io.clientThreads SKIP SKIP +spark.{driver|executor}.rpc.netty.dispatcher.numThreads SKIP SKIP +spark.streaming.backpressure.enabled true SKIP +spark.streaming.backpressure.initialRate SKIP SKIP +spark.streaming.blockInterval 1ms 400ms +spark.streaming.receiver.maxRate SKIP SKIP +spark.streaming.receiver.writeAheadLog.enable true SKIP +spark.streaming.unpersist false SKIP +spark.streaming.stopGracefullyOnShutdown true SKIP +spark.streaming.kafka.maxRatePerPartition SKIP SKIP +spark.streaming.kafka.minRatePerPartition 0 2 +spark.streaming.ui.retainedBatches 500 2000 +spark.streaming.driver.writeAheadLog.closeFileAfterWrite true SKIP +spark.streaming.receiver.writeAheadLog.closeFileAfterWrite true SKIP +spark.r.numRBackendThreads 1 4 +spark.r.command SKIP SKIP +spark.r.driver.command SKIP SKIP +spark.r.shell.command SKIP SKIP +spark.r.backendConnectionTimeout 3000 12000 +spark.r.heartBeatInterval 50 200 +spark.graphx.pregel.checkpointInterval 0 -2 +spark.deploy.recoveryMode SKIP SKIP +spark.deploy.zookeeper.url SKIP SKIP +spark.deploy.zookeeper.dir /valid/dir1 /valid/dir2 +spark.shuffle.push.server.mergedShuffleFileManagerImpl SKIP SKIP +spark.shuffle.push.server.minChunkSizeInMergedShuffleFile 1m 4m +spark.shuffle.push.server.mergedIndexCacheSize 1m 200m +spark.shuffle.push.enabled true SKIP +spark.shuffle.push.finalize.timeout 1s 20s +spark.shuffle.push.maxRetainedMergerLocations 250 1000 +spark.shuffle.push.mergersMinThresholdRatio 0.025 0.1 +spark.shuffle.push.mergersMinStaticThreshold 1 10 +spark.shuffle.push.numPushThreads SKIP SKIP +spark.shuffle.push.maxBlockSizeToPush 10m 2m +spark.shuffle.push.maxBlockBatchSize 1m 6m +spark.shuffle.push.merge.finalizeThreads 1 16 +spark.shuffle.push.minShuffleSizeToWait 1m 1000m +spark.shuffle.push.minCompletedPushRatio 0.5 2.0 diff --git a/core/generate_value/value_generation.py b/core/generate_value/value_generation.py index 82a5bde8..6d1a58ed 100644 --- a/core/generate_value/value_generation.py +++ b/core/generate_value/value_generation.py @@ -25,6 +25,9 @@ def read_tsv(module): if module == "zookeeper-server": assert len(params) == 32 return 32 + elif module == "spark-core": + assert len(params) == 365 + return 365 else: assert len(params) == 90 return 90 diff --git a/core/identify_param/add_project.sh b/core/identify_param/add_project.sh index 954dbab8..0321d41f 100755 --- a/core/identify_param/add_project.sh +++ b/core/identify_param/add_project.sh @@ -47,6 +47,13 @@ function setup_alluxio() { mvn clean install -DskipTests -Dcheckstyle.skip -Dlicense.skip -Dfindbugs.skip -Dmaven.javadoc.skip=true } +function setup_spark() { + [ ! -d "app/ctest-spark" ] && git clone https://github.com/ZHLOLin/spark.git app/ctest-spark + cd app/ctest-spark + git fetch && git checkout ctest_enable_logging + mvn clean install -pl core -am -DskipTests +} + function usage() { echo "Usage: add_project.sh
" exit 1 @@ -63,7 +70,8 @@ function main() { hbase) setup_hbase ;; zookeeper) setup_zookeeper ;; alluxio) setup_alluxio ;; - *) echo "Unexpected project: $project - only support hadoop, hbase, zookeeper and alluxio." ;; + spark) setup_spark ;; + *) echo "Unexpected project: $project - only support hadoop, hbase, zookeeper, spark and alluxio." ;; esac fi } diff --git a/core/identify_param/collector.py b/core/identify_param/collector.py index 4c1eee00..482a1af5 100644 --- a/core/identify_param/collector.py +++ b/core/identify_param/collector.py @@ -20,13 +20,17 @@ def __init__(self, module): self.param_setter_map = {} self.param_unset_getter_map = {} self.params = utils.get_default_params_from_file(self.module) + if module in ["spark-core"]: + self.indent = " param: " + else: + self.indent = " " print("total number of configuration parameters: " + str(len(self.params))) def parse_getter_record_file(self): for line in open(self.getter_record_file).readlines(): line = line.strip("\n") - class_pound_method = line.split(" ")[0] - param = line.split(" ")[1] + class_pound_method = line.split(self.indent)[0] + param = line.split(self.indent)[1] assert param in self.params, "wrong parameter" if param not in self.param_getter_map.keys(): @@ -36,8 +40,8 @@ def parse_getter_record_file(self): def parse_setter_record_file(self): for line in open(self.setter_record_file).readlines(): line = line.strip("\n") - class_pound_method = line.split(" ")[0] - param = line.split(" ")[1] + class_pound_method = line.split(self.indent)[0] + param = line.split(self.indent)[1] assert param in self.params, "wrong parameter" if param not in self.param_setter_map.keys(): diff --git a/core/identify_param/constant.py b/core/identify_param/constant.py index a0b2d067..bed77995 100644 --- a/core/identify_param/constant.py +++ b/core/identify_param/constant.py @@ -7,12 +7,14 @@ CTEST_HBASE_DIR = os.path.join(APP_DIR, "ctest-hbase") CTEST_ZOOKEEPER_DIR = os.path.join(APP_DIR, "ctest-zookeeper") CTEST_ALLUXIO_DIR = os.path.join(APP_DIR, "ctest-alluxio") +CTEST_SPARK_DIR = os.path.join(APP_DIR, "ctest-spark") MODULE_PATH = { "hadoop-common": CTEST_HADOOP_DIR, "hadoop-hdfs": CTEST_HADOOP_DIR, "hbase-server": CTEST_HBASE_DIR, - "alluxio-core": CTEST_ALLUXIO_DIR + "alluxio-core": CTEST_ALLUXIO_DIR, + "spark-core": CTEST_SPARK_DIR } SRC_SUBDIR = { @@ -20,7 +22,8 @@ "hadoop-hdfs": "hadoop-hdfs-project/hadoop-hdfs", "hbase-server": "hbase-server", "zookeeper-server": "zookeeper-server", - "alluxio-core": "core" + "alluxio-core": "core", + "spark-core": "core" } MVN_TEST_PATH = { @@ -29,6 +32,7 @@ "hbase-server": os.path.join(CTEST_HBASE_DIR, SRC_SUBDIR["hbase-server"]), "zookeeper-server": os.path.join(CTEST_ZOOKEEPER_DIR, SRC_SUBDIR["zookeeper-server"]), "alluxio-core": os.path.join(CTEST_ALLUXIO_DIR, SRC_SUBDIR["alluxio-core"]), + "spark-core": os.path.join(CTEST_SPARK_DIR, SRC_SUBDIR["spark-core"]) } LOCAL_CONF_PATH = { @@ -36,7 +40,8 @@ "hadoop-hdfs": "results/hadoop-hdfs/conf_params.txt", "hbase-server": "results/hbase-server/conf_params.txt", "zookeeper-server": "results/zookeeper-server/conf_params.txt", - "alluxio-core": "results/alluxio-core/conf_params.txt" + "alluxio-core": "results/alluxio-core/conf_params.txt", + "spark-core": "results/spark-core/conf_params.txt" } SUREFIRE_SUBDIR = "target/surefire-reports/*" @@ -63,6 +68,9 @@ os.path.join(CTEST_ALLUXIO_DIR, "core/server/proxy", SUREFIRE_SUBDIR), os.path.join(CTEST_ALLUXIO_DIR, "core/server/worker", SUREFIRE_SUBDIR), os.path.join(CTEST_ALLUXIO_DIR, "core/server/master", SUREFIRE_SUBDIR) + ], + "spark-core": [ + os.path.join(CTEST_SPARK_DIR, SUREFIRE_SUBDIR) ] } diff --git a/core/identify_param/identify_param.sh b/core/identify_param/identify_param.sh index 76963419..96cb95b8 100755 --- a/core/identify_param/identify_param.sh +++ b/core/identify_param/identify_param.sh @@ -12,9 +12,9 @@ function main() { usage else case $project in - hadoop-common | hadoop-hdfs | hbase-server | zookeeper-server | alluxio-core) python3 runner.py $project; python3 collector.py $project ;; + hadoop-common | hadoop-hdfs | hbase-server | zookeeper-server | alluxio-core | spark-core) python3 runner.py $project; python3 collector.py $project ;; -h | --help) usage ;; - *) echo "Unexpected project: $project - only support hadoop-common, hadoop-hdfs, hbase-server, zookeeper-server and alluxio-core." ;; + *) echo "Unexpected project: $project - only support hadoop-common, hadoop-hdfs, hbase-server, zookeeper-server, alluxio-core and spark-core." ;; esac fi } diff --git a/core/identify_param/results/spark-core/conf_params.txt b/core/identify_param/results/spark-core/conf_params.txt new file mode 100644 index 00000000..57df080e --- /dev/null +++ b/core/identify_param/results/spark-core/conf_params.txt @@ -0,0 +1,365 @@ +spark.app.name +spark.driver.cores +spark.driver.maxResultSize +spark.driver.memory +spark.driver.memoryOverhead +spark.driver.memoryOverheadFactor +spark.driver.resource.{resourceName}.amount +spark.driver.resource.{resourceName}.discoveryScript +spark.driver.resource.{resourceName}.vendor +spark.resources.discoveryPlugin +spark.executor.memory +spark.executor.pyspark.memory +spark.executor.memoryOverhead +spark.executor.memoryOverheadFactor +spark.executor.resource.{resourceName}.amount +spark.executor.resource.{resourceName}.discoveryScript +spark.executor.resource.{resourceName}.vendor +spark.extraListeners +spark.local.dir +spark.logConf +spark.master +spark.submit.deployMode +spark.log.callerContext +spark.driver.supervise +spark.driver.log.dfsDir +spark.driver.log.persistToDfs.enabled +spark.driver.log.layout +spark.driver.log.allowErasureCoding +spark.driver.extraClassPath +spark.driver.defaultJavaOptions +spark.driver.extraJavaOptions +spark.driver.extraLibraryPath +spark.driver.userClassPathFirst +spark.executor.extraClassPath +spark.executor.defaultJavaOptions +spark.executor.extraJavaOptions +spark.executor.extraLibraryPath +spark.executor.logs.rolling.maxRetainedFiles +spark.executor.logs.rolling.enableCompression +spark.executor.logs.rolling.maxSize +spark.executor.logs.rolling.strategy +spark.executor.logs.rolling.time.interval +spark.executor.userClassPathFirst +spark.executorEnv.[EnvironmentVariableName] +spark.redaction.regex +spark.python.profile +spark.python.profile.dump +spark.python.worker.memory +spark.python.worker.reuse +spark.files +spark.submit.pyFiles +spark.jars +spark.jars.packages +spark.jars.excludes +spark.jars.ivy +spark.jars.ivySettings +spark.jars.repositories +spark.archives +spark.pyspark.driver.python +spark.pyspark.python +spark.reducer.maxSizeInFlight +spark.reducer.maxReqsInFlight +spark.reducer.maxBlocksInFlightPerAddress +spark.shuffle.compress +spark.shuffle.file.buffer +spark.shuffle.io.maxRetries +spark.shuffle.io.numConnectionsPerPeer +spark.shuffle.io.preferDirectBufs +spark.shuffle.io.retryWait +spark.shuffle.io.backLog +spark.shuffle.io.connectionTimeout +spark.shuffle.service.enabled +spark.shuffle.service.port +spark.shuffle.service.index.cache.size +spark.shuffle.service.removeShuffle +spark.shuffle.maxChunksBeingTransferred +spark.shuffle.sort.bypassMergeThreshold +spark.shuffle.spill.compress +spark.shuffle.accurateBlockThreshold +spark.shuffle.registration.timeout +spark.shuffle.registration.maxAttempts +spark.files.io.connectionTimeout +spark.shuffle.checksum.enabled +spark.shuffle.checksum.algorithm +spark.shuffle.service.fetch.rdd.enabled +spark.driver.extraClassPath +spark.driver.defaultJavaOptions +spark.driver.extraJavaOptions +spark.driver.extraLibraryPath +spark.driver.userClassPathFirst +spark.executor.extraClassPath +spark.executor.defaultJavaOptions +spark.executor.extraJavaOptions +spark.executor.extraLibraryPath +spark.executor.logs.rolling.maxRetainedFiles +spark.executor.logs.rolling.enableCompression +spark.executor.logs.rolling.maxSize +spark.executor.logs.rolling.strategy +spark.executor.logs.rolling.time.interval +spark.executor.userClassPathFirst +spark.executorEnv.[EnvironmentVariableName] +spark.redaction.regex +spark.redaction.string.regex +spark.python.profile +spark.python.profile.dump +spark.python.worker.memory +spark.python.worker.reuse +spark.files +spark.submit.pyFiles +spark.jars +spark.jars.packages +spark.jars.excludes +spark.jars.ivy +spark.jars.ivySettings +spark.jars.repositories +spark.archives +spark.pyspark.driver.python +spark.pyspark.python +spark.reducer.maxSizeInFlight +spark.reducer.maxReqsInFlight +spark.reducer.maxBlocksInFlightPerAddress +spark.shuffle.compress +spark.shuffle.file.buffer +spark.shuffle.unsafe.file.output.buffer +spark.shuffle.spill.diskWriteBufferSize +spark.shuffle.io.maxRetries +spark.shuffle.io.numConnectionsPerPeer +spark.shuffle.io.preferDirectBufs +spark.shuffle.io.retryWait +spark.shuffle.io.backLog +spark.shuffle.io.connectionTimeout +spark.shuffle.service.enabled +spark.shuffle.service.port +spark.shuffle.service.name +spark.shuffle.service.index.cache.size +spark.shuffle.service.removeShuffle +spark.shuffle.maxChunksBeingTransferred +spark.shuffle.sort.bypassMergeThreshold +spark.shuffle.sort.io.plugin.class +spark.shuffle.spill.compress +spark.shuffle.accurateBlockThreshold +spark.shuffle.registration.timeout +spark.shuffle.registration.maxAttempts +spark.shuffle.reduceLocality.enabled +spark.shuffle.mapOutput.minSizeForBroadcast +spark.shuffle.detectCorrupt +spark.shuffle.detectCorrupt.useExtraMemory +spark.shuffle.useOldFetchProtocol +spark.shuffle.readHostLocalDisk +spark.files.io.connectionTimeout +spark.shuffle.checksum.enabled +spark.shuffle.checksum.algorithm +spark.shuffle.service.fetch.rdd.enabled +spark.shuffle.service.db.enabled +spark.shuffle.service.db.backend +spark.eventLog.logBlockUpdates.enabled +spark.eventLog.longForm.enabled +spark.eventLog.compress +spark.eventLog.compression.codec +spark.eventLog.erasureCoding.enabled +spark.eventLog.dir +spark.eventLog.enabled +spark.eventLog.overwrite +spark.eventLog.buffer.kb +spark.eventLog.rolling.enabled +spark.eventLog.rolling.maxFileSize +spark.ui.dagGraph.retainedRootRDDs +spark.ui.enabled +spark.ui.killEnabled +spark.ui.liveUpdate.period +spark.ui.liveUpdate.minFlushPeriod +spark.ui.port +spark.ui.retainedJobs +spark.ui.retainedStages +spark.ui.retainedTasks +spark.ui.reverseProxy +spark.ui.reverseProxyUrl +spark.ui.proxyRedirectUri +spark.ui.showConsoleProgress +spark.ui.custom.executor.log.url +spark.worker.ui.retainedExecutors +spark.worker.ui.retainedDrivers +spark.sql.ui.retainedExecutions +spark.streaming.ui.retainedBatches +spark.ui.retainedDeadExecutors +spark.ui.filters +spark.ui.requestHeaderSize +spark.ui.timelineEnabled +spark.ui.timeline.executors.maximum +spark.ui.timeline.jobs.maximum +spark.ui.timeline.stages.maximum +spark.ui.timeline.tasks.maximum +spark.appStatusStore.diskStoreDir +spark.broadcast.compress +spark.checkpoint.compress +spark.io.compression.codec +spark.io.compression.lz4.blockSize +spark.io.compression.snappy.blockSize +spark.io.compression.zstd.level +spark.io.compression.zstd.bufferSize +spark.kryo.classesToRegister +spark.kryo.referenceTracking +spark.kryo.registrationRequired +spark.kryo.registrator +spark.kryo.unsafe +spark.kryoserializer.buffer.max +spark.kryoserializer.buffer +spark.rdd.compress +spark.serializer +spark.serializer.objectStreamReset +spark.memory.fraction +spark.memory.storageFraction +spark.memory.offHeap.enabled +spark.memory.offHeap.size +spark.storage.unrollMemoryThreshold +spark.storage.replication.proactive +spark.storage.localDiskByExecutors.cacheSize +spark.cleaner.periodicGC.interval +spark.cleaner.referenceTracking +spark.cleaner.referenceTracking.blocking +spark.cleaner.referenceTracking.blocking.shuffle +spark.cleaner.referenceTracking.cleanCheckpoints +spark.broadcast.blockSize +spark.broadcast.checksum +spark.broadcast.UDFCompressionThreshold +spark.executor.cores +spark.default.parallelism +spark.executor.heartbeatInterval +spark.files.fetchTimeout +spark.files.useFetchCache +spark.files.overwrite +spark.files.ignoreCorruptFiles +spark.files.ignoreMissingFiles +spark.files.maxPartitionBytes +spark.files.openCostInBytes +spark.hadoop.cloneConf +spark.hadoop.validateOutputSpecs +spark.storage.memoryMapThreshold +spark.storage.decommission.enabled +spark.storage.decommission.shuffleBlocks.enabled +spark.storage.decommission.shuffleBlocks.maxThreads +spark.storage.decommission.rddBlocks.enabled +spark.storage.decommission.fallbackStorage.path +spark.storage.decommission.fallbackStorage.cleanUp +spark.storage.decommission.shuffleBlocks.maxDiskSize +spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version +spark.eventLog.logStageExecutorMetrics +spark.executor.processTreeMetrics.enabled +spark.executor.metrics.pollingInterval +spark.eventLog.gcMetrics.youngGenerationGarbageCollectors +spark.eventLog.gcMetrics.oldGenerationGarbageCollectors +spark.executor.metrics.fileSystemSchemes +spark.rpc.message.maxSize +spark.blockManager.port +spark.driver.blockManager.port +spark.driver.bindAddress +spark.driver.host +spark.driver.port +spark.rpc.io.backLog +spark.network.timeout +spark.network.io.preferDirectBufs +spark.port.maxRetries +spark.rpc.askTimeout +spark.rpc.lookupTimeout +spark.network.maxRemoteBlockSizeFetchToMem +spark.rpc.io.connectionTimeout +spark.cores.max +spark.locality.wait +spark.locality.wait.node +spark.locality.wait.process +spark.locality.wait.rack +spark.scheduler.maxRegisteredResourcesWaitingTime +spark.scheduler.minRegisteredResourcesRatio +spark.scheduler.mode +spark.scheduler.revive.interval +spark.scheduler.listenerbus.eventqueue.capacity +spark.scheduler.listenerbus.eventqueue.shared.capacity +spark.scheduler.listenerbus.eventqueue.appStatus.capacity +spark.scheduler.listenerbus.eventqueue.executorManagement.capacity +spark.scheduler.listenerbus.eventqueue.eventLog.capacity +spark.scheduler.listenerbus.eventqueue.streams.capacity +spark.scheduler.resource.profileMergeConflicts +spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout +spark.standalone.submit.waitAppCompletion +spark.excludeOnFailure.enabled +spark.excludeOnFailure.timeout +spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor +spark.excludeOnFailure.task.maxTaskAttemptsPerNode +spark.excludeOnFailure.stage.maxFailedTasksPerExecutor +spark.excludeOnFailure.stage.maxFailedExecutorsPerNode +spark.excludeOnFailure.application.maxFailedTasksPerExecutor +spark.excludeOnFailure.application.maxFailedExecutorsPerNode +spark.excludeOnFailure.killExcludedExecutors +spark.excludeOnFailure.application.fetchFailure.enabled +spark.speculation +spark.speculation.interval +spark.speculation.multiplier +spark.speculation.quantile +spark.speculation.minTaskRuntime +spark.speculation.task.duration.threshold +spark.speculation.efficiency.processRateMultiplier +spark.speculation.efficiency.longRunTaskFactor +spark.speculation.efficiency.enabled +spark.task.cpus +spark.task.resource.{resourceName}.amount +spark.task.maxFailures +spark.task.reaper.enabled +spark.task.reaper.pollingInterval +spark.task.reaper.threadDump +spark.task.reaper.killTimeout +spark.stage.maxConsecutiveAttempts +spark.stage.ignoreDecommissionFetchFailure +spark.barrier.sync.timeout +spark.scheduler.barrier.maxConcurrentTasksCheck.interval +spark.scheduler.barrier.maxConcurrentTasksCheck.maxFailures +spark.dynamicAllocation.enabled +spark.dynamicAllocation.executorIdleTimeout +spark.dynamicAllocation.cachedExecutorIdleTimeout +spark.dynamicAllocation.initialExecutors +spark.dynamicAllocation.maxExecutors +spark.dynamicAllocation.minExecutors +spark.dynamicAllocation.executorAllocationRatio +spark.dynamicAllocation.schedulerBacklogTimeout +spark.dynamicAllocation.sustainedSchedulerBacklogTimeout +spark.dynamicAllocation.shuffleTracking.enabled +spark.dynamicAllocation.shuffleTracking.timeout +spark.{driver|executor}.rpc.io.serverThreads +spark.{driver|executor}.rpc.io.clientThreads +spark.{driver|executor}.rpc.netty.dispatcher.numThreads +spark.streaming.backpressure.enabled +spark.streaming.backpressure.initialRate +spark.streaming.blockInterval +spark.streaming.receiver.maxRate +spark.streaming.receiver.writeAheadLog.enable +spark.streaming.unpersist +spark.streaming.stopGracefullyOnShutdown +spark.streaming.kafka.maxRatePerPartition +spark.streaming.kafka.minRatePerPartition +spark.streaming.ui.retainedBatches +spark.streaming.driver.writeAheadLog.closeFileAfterWrite +spark.streaming.receiver.writeAheadLog.closeFileAfterWrite +spark.r.numRBackendThreads +spark.r.command +spark.r.driver.command +spark.r.shell.command +spark.r.backendConnectionTimeout +spark.r.heartBeatInterval +spark.graphx.pregel.checkpointInterval +spark.deploy.recoveryMode +spark.deploy.zookeeper.url +spark.deploy.zookeeper.dir +spark.shuffle.push.server.mergedShuffleFileManagerImpl +spark.shuffle.push.server.minChunkSizeInMergedShuffleFile +spark.shuffle.push.server.mergedIndexCacheSize +spark.shuffle.push.enabled +spark.shuffle.push.finalize.timeout +spark.shuffle.push.maxRetainedMergerLocations +spark.shuffle.push.mergersMinThresholdRatio +spark.shuffle.push.mergersMinStaticThreshold +spark.shuffle.push.numPushThreads +spark.shuffle.push.maxBlockSizeToPush +spark.shuffle.push.maxBlockBatchSize +spark.shuffle.push.merge.finalizeThreads +spark.shuffle.push.minShuffleSizeToWait +spark.shuffle.push.minCompletedPushRatio \ No newline at end of file diff --git a/core/identify_param/results/spark-core/test_method_list.json b/core/identify_param/results/spark-core/test_method_list.json new file mode 100644 index 00000000..348832a4 --- /dev/null +++ b/core/identify_param/results/spark-core/test_method_list.json @@ -0,0 +1 @@ +["org.apache.spark.AccumulatorSuite @ accumulator serialization", "org.apache.spark.AccumulatorSuite @ get accum", "org.apache.spark.api.java.JavaUtilsSuite @ containsKey implementation without iteratively entrySet call", "org.apache.spark.api.java.OptionalSuite @ testAbsentGet", "org.apache.spark.api.java.OptionalSuite @ testOfWithNull", "org.apache.spark.api.java.OptionalSuite @ testEmpty", "org.apache.spark.api.java.OptionalSuite @ testOf", "org.apache.spark.api.java.OptionalSuite @ testFromNullable", "org.apache.spark.api.java.OptionalSuite @ testAbsent", "org.apache.spark.api.java.OptionalSuite @ testOfNullable", "org.apache.spark.api.java.OptionalSuite @ testEmptyGet", "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", "org.apache.spark.api.python.PythonHadoopUtilSuite @ Testing roundtrip conversion of various types", "org.apache.spark.api.python.PythonHadoopUtilSuite @ Testing that BytesWritables convert to arrays of bytes and back", "org.apache.spark.api.python.PythonHadoopUtilSuite @ Testing that MapWritables convert to Maps and back", "org.apache.spark.api.python.PythonRDDSuite @ Writing large strings to the worker", "org.apache.spark.api.python.PythonRDDSuite @ Handle nulls gracefully", "org.apache.spark.api.python.PythonRDDSuite @ python server error handling", "org.apache.spark.api.python.PythonRDDSuite @ mapToConf should not load defaults", "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", "org.apache.spark.api.r.JVMObjectTrackerSuite @ JVMObjectId does not take null IDs", "org.apache.spark.api.r.JVMObjectTrackerSuite @ JVMObjectTracker", "org.apache.spark.api.r.RBackendSuite @ close() clears jvmObjectTracker", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", "org.apache.spark.broadcast.BroadcastSuite @ TorrentBroadcast's blockifyObject and unblockifyObject are inverses", "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", "org.apache.spark.ContextCleanerSuite @ cleanup RDD", "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", "org.apache.spark.deploy.ClientSuite @ correctly validates driver jar URL's", "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", "org.apache.spark.deploy.ExternalShuffleServiceMetricsSuite @ SPARK-31646: metrics should be registered", "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", "org.apache.spark.deploy.history.ApplicationCacheSuite @ Completed UI get", "org.apache.spark.deploy.history.ApplicationCacheSuite @ Test that if an attempt ID is set, it must be used in lookups", "org.apache.spark.deploy.history.ApplicationCacheSuite @ Incomplete apps refreshed", "org.apache.spark.deploy.history.ApplicationCacheSuite @ Large Scale Application Eviction", "org.apache.spark.deploy.history.ApplicationCacheSuite @ Attempts are Evicted", "org.apache.spark.deploy.history.ApplicationCacheSuite @ redirect includes query params", "org.apache.spark.deploy.history.BasicEventFilterBuilderSuite @ track live jobs", "org.apache.spark.deploy.history.BasicEventFilterBuilderSuite @ track live executors", "org.apache.spark.deploy.history.BasicEventFilterSuite @ filter out events for finished jobs", "org.apache.spark.deploy.history.BasicEventFilterSuite @ accept all events for block manager addition/removal on driver", "org.apache.spark.deploy.history.BasicEventFilterSuite @ filter out events for dead executors", "org.apache.spark.deploy.history.BasicEventFilterSuite @ other events should be left to other filters", "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ No event log files", "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ No compact file, less origin files available than max files to retain", "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ No compact file, more origin files available than max files to retain", "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ compact file exists, less origin files available than max files to retain", "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ compact file exists, number of origin files are same as max files to retain", "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ compact file exists, more origin files available than max files to retain", "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ events for finished job are dropped in new compact file", "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ Don't compact file if score is lower than threshold", "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ rewrite files with test filters", "org.apache.spark.deploy.history.HistoryServerArgumentsSuite @ No Arguments Parsing", "org.apache.spark.deploy.history.HistoryServerArgumentsSuite @ Properties File Arguments Parsing --properties-file", "org.apache.spark.deploy.history.HistoryServerDiskManagerUseLevelDBSuite @ leasing space", "org.apache.spark.deploy.history.HistoryServerDiskManagerUseLevelDBSuite @ tracking active stores", "org.apache.spark.deploy.history.HistoryServerDiskManagerUseLevelDBSuite @ approximate size heuristic", "org.apache.spark.deploy.history.HistoryServerDiskManagerUseLevelDBSuite @ SPARK-32024: update ApplicationStoreInfo.size during initializing", "org.apache.spark.deploy.history.HistoryServerDiskManagerUseLevelDBSuite @ SPARK-38095: appStorePath should use backend extensions", "org.apache.spark.deploy.history.HistoryServerDiskManagerUseRocksDBSuite @ leasing space", "org.apache.spark.deploy.history.HistoryServerDiskManagerUseRocksDBSuite @ tracking active stores", "org.apache.spark.deploy.history.HistoryServerDiskManagerUseRocksDBSuite @ approximate size heuristic", "org.apache.spark.deploy.history.HistoryServerDiskManagerUseRocksDBSuite @ SPARK-32024: update ApplicationStoreInfo.size during initializing", "org.apache.spark.deploy.history.HistoryServerDiskManagerUseRocksDBSuite @ SPARK-38095: appStorePath should use backend extensions", "org.apache.spark.deploy.history.HistoryServerMemoryManagerSuite @ lease and release memory", "org.apache.spark.deploy.history.HistoryServerPageSuite @ SPARK-39620: should behaves the same as REST API when filtering applications", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-3697: ignore files that cannot be read.", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ history file is renamed from inprogress to completed", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-39439: Check final file if in-progress event log file does not exist", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse logs that application is not started", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-5582: empty log directory", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls, including FILE_NAME", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls, excluding FILE_NAME", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls, LOG_FILES not available while FILE_NAME is specified", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls, app not finished, applyIncompleteApplication: true", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls, app not finished, applyIncompleteApplication: false", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log cleaner", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ should not clean inprogress application with lastUpdated time less than maxTime", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log cleaner for inProgress files", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Event log copy", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ driver log cleaner", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-8372: new logs with no app ID are ignored", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ provider correctly checks whether fs is in safe mode", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ provider waits for safe mode to finish before initializing", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ provider reports error after FS leaves safe mode", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ ignore hidden files", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ mismatched version discards old listing", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-21571: clean up removes invalid history files", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ always find end event for finished apps", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ parse event logs with optimizations off", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-24948: ignore files we don't have read permission on", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ check in-progress event logs absolute length", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log cleaner with the maximum number of log files", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ backwards compatibility with LogInfo from Spark 2.4", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-29755 LogInfo should be serialized/deserialized by jackson properly", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-29755 AttemptInfoWrapper should be serialized/deserialized by jackson properly", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-29043: clean up specified event log", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-33215: check ui view permissions without retrieving ui", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ application list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ completed app list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ running app list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate app list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate app list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate2 app list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate app list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxEndDate app list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ limit app list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app multi-attempt json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download one log for app with multiple attempts", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ /version api endpoint", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", "org.apache.spark.deploy.history.LevelDBHybridStoreSuite @ test multiple objects write read delete", "org.apache.spark.deploy.history.LevelDBHybridStoreSuite @ test metadata", "org.apache.spark.deploy.history.LevelDBHybridStoreSuite @ test update", "org.apache.spark.deploy.history.LevelDBHybridStoreSuite @ test basic iteration", "org.apache.spark.deploy.history.LevelDBHybridStoreSuite @ test delete after switch", "org.apache.spark.deploy.history.LevelDBHybridStoreSuite @ test klassMap", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-3697: ignore files that cannot be read.", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ history file is renamed from inprogress to completed", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-39439: Check final file if in-progress event log file does not exist", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse logs that application is not started", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-5582: empty log directory", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls, including FILE_NAME", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls, excluding FILE_NAME", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls, LOG_FILES not available while FILE_NAME is specified", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls, app not finished, applyIncompleteApplication: true", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls, app not finished, applyIncompleteApplication: false", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log cleaner", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ should not clean inprogress application with lastUpdated time less than maxTime", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log cleaner for inProgress files", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Event log copy", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ driver log cleaner", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-8372: new logs with no app ID are ignored", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ provider correctly checks whether fs is in safe mode", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ provider waits for safe mode to finish before initializing", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ provider reports error after FS leaves safe mode", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ ignore hidden files", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ mismatched version discards old listing", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-21571: clean up removes invalid history files", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ always find end event for finished apps", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ parse event logs with optimizations off", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-24948: ignore files we don't have read permission on", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ check in-progress event logs absolute length", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log cleaner with the maximum number of log files", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ backwards compatibility with LogInfo from Spark 2.4", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-29755 LogInfo should be serialized/deserialized by jackson properly", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-29755 AttemptInfoWrapper should be serialized/deserialized by jackson properly", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-29043: clean up specified event log", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-33215: check ui view permissions without retrieving ui", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ application list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ completed app list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ running app list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate app list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate app list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate2 app list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate app list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxEndDate app list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ limit app list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app multi-attempt json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download one log for app with multiple attempts", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ /version api endpoint", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", "org.apache.spark.deploy.history.RocksDBHybridStoreSuite @ test multiple objects write read delete", "org.apache.spark.deploy.history.RocksDBHybridStoreSuite @ test metadata", "org.apache.spark.deploy.history.RocksDBHybridStoreSuite @ test update", "org.apache.spark.deploy.history.RocksDBHybridStoreSuite @ test basic iteration", "org.apache.spark.deploy.history.RocksDBHybridStoreSuite @ test delete after switch", "org.apache.spark.deploy.history.RocksDBHybridStoreSuite @ test klassMap", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ Retrieve EventLogFileReader correctly", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ get information, list event log files, zip log files - with codec None", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ get information, list event log files, zip log files - with codec Some(lz4)", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ get information, list event log files, zip log files - with codec Some(lzf)", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ get information, list event log files, zip log files - with codec Some(snappy)", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ get information, list event log files, zip log files - with codec Some(zstd)", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec None", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lz4)", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lzf)", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(snappy)", "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(zstd)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ create EventLogFileWriter with enable/disable rolling", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ initialize, write, stop - with codec None", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ initialize, write, stop - with codec Some(lz4)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ initialize, write, stop - with codec Some(lzf)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ initialize, write, stop - with codec Some(snappy)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ initialize, write, stop - with codec Some(zstd)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Event log names", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Log overwriting", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec None", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lz4)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lzf)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(snappy)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(zstd)", "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - the max size of event log file size less than lower limit", "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ create EventLogFileWriter with enable/disable rolling", "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ initialize, write, stop - with codec None", "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ initialize, write, stop - with codec Some(lz4)", "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ initialize, write, stop - with codec Some(lzf)", "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ initialize, write, stop - with codec Some(snappy)", "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ initialize, write, stop - with codec Some(zstd)", "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Log overwriting", "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Event log name", "org.apache.spark.deploy.history.SingleFileEventLogFileReaderSuite @ Retrieve EventLogFileReader correctly", "org.apache.spark.deploy.history.SingleFileEventLogFileReaderSuite @ get information, list event log files, zip log files - with codec None", "org.apache.spark.deploy.history.SingleFileEventLogFileReaderSuite @ get information, list event log files, zip log files - with codec Some(lz4)", "org.apache.spark.deploy.history.SingleFileEventLogFileReaderSuite @ get information, list event log files, zip log files - with codec Some(lzf)", "org.apache.spark.deploy.history.SingleFileEventLogFileReaderSuite @ get information, list event log files, zip log files - with codec Some(snappy)", "org.apache.spark.deploy.history.SingleFileEventLogFileReaderSuite @ get information, list event log files, zip log files - with codec Some(zstd)", "org.apache.spark.deploy.JsonProtocolSuite @ writeApplicationInfo", "org.apache.spark.deploy.JsonProtocolSuite @ writeWorkerInfo", "org.apache.spark.deploy.JsonProtocolSuite @ writeApplicationDescription", "org.apache.spark.deploy.JsonProtocolSuite @ writeExecutorRunner", "org.apache.spark.deploy.JsonProtocolSuite @ writeDriverInfo", "org.apache.spark.deploy.JsonProtocolSuite @ writeMasterState", "org.apache.spark.deploy.JsonProtocolSuite @ writeWorkerState", "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - spread out", "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - no spread out", "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - spread out", "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - no spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - no spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - no spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - no spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - no spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - no spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - no spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - no spread out", "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", "org.apache.spark.deploy.master.MasterSuite @ resource description with multiple resource profiles", "org.apache.spark.deploy.master.PersistenceEngineSuite @ FileSystemPersistenceEngine", "org.apache.spark.deploy.master.PersistenceEngineSuite @ ZooKeeperPersistenceEngine", "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill application", "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill driver", "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill one host", "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill multiple hosts", "org.apache.spark.deploy.PythonRunnerSuite @ format path", "org.apache.spark.deploy.PythonRunnerSuite @ format paths", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ construct submit request", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission with multiple masters", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission from main method", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill submission", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ request submission status", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill or request status before create", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths, bad requests", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ bad request paths", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client does not send 'SPARK_ENV_LOADED' env var by default", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client does not send 'SPARK_HOME' env var by default", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client does not send 'SPARK_CONF_DIR' env var by default", "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client includes mesos env vars", "org.apache.spark.deploy.rest.SubmitRestProtocolSuite @ validate", "org.apache.spark.deploy.rest.SubmitRestProtocolSuite @ request to and from JSON", "org.apache.spark.deploy.rest.SubmitRestProtocolSuite @ response to and from JSON", "org.apache.spark.deploy.rest.SubmitRestProtocolSuite @ CreateSubmissionRequest", "org.apache.spark.deploy.rest.SubmitRestProtocolSuite @ CreateSubmissionResponse", "org.apache.spark.deploy.rest.SubmitRestProtocolSuite @ KillSubmissionResponse", "org.apache.spark.deploy.rest.SubmitRestProtocolSuite @ SubmissionStatusResponse", "org.apache.spark.deploy.rest.SubmitRestProtocolSuite @ ErrorResponse", "org.apache.spark.deploy.RPackageUtilsSuite @ pick which jars to unpack using the manifest", "org.apache.spark.deploy.RPackageUtilsSuite @ build an R package from a jar end to end", "org.apache.spark.deploy.RPackageUtilsSuite @ jars that don't exist are skipped and print warning", "org.apache.spark.deploy.RPackageUtilsSuite @ faulty R package shows documentation", "org.apache.spark.deploy.RPackageUtilsSuite @ jars without manifest return false", "org.apache.spark.deploy.RPackageUtilsSuite @ SparkR zipping works properly", "org.apache.spark.deploy.security.HadoopDelegationTokenManagerSuite @ default configuration", "org.apache.spark.deploy.security.HadoopDelegationTokenManagerSuite @ disable hadoopfs credential provider", "org.apache.spark.deploy.security.HadoopDelegationTokenManagerSuite @ using deprecated configurations", "org.apache.spark.deploy.security.HadoopDelegationTokenManagerSuite @ SPARK-29082: do not fail if current user does not have credentials", "org.apache.spark.deploy.security.HadoopFSDelegationTokenProviderSuite @ hadoopFSsToAccess should return defaultFS even if not configured", "org.apache.spark.deploy.SparkHadoopUtilSuite @ appendSparkHadoopConfigs with propagation and defaults", "org.apache.spark.deploy.SparkHadoopUtilSuite @ appendSparkHadoopConfigs with S3A endpoint set to empty string", "org.apache.spark.deploy.SparkHadoopUtilSuite @ appendSparkHadoopConfigs with S3A options explicitly set", "org.apache.spark.deploy.SparkHadoopUtilSuite @ appendSparkHadoopConfigs with S3A endpoint region set to an empty string", "org.apache.spark.deploy.SparkSubmitSuite @ prints usage on empty input", "org.apache.spark.deploy.SparkSubmitSuite @ prints usage with only --help", "org.apache.spark.deploy.SparkSubmitSuite @ prints error with unrecognized options", "org.apache.spark.deploy.SparkSubmitSuite @ handle binary specified but not class", "org.apache.spark.deploy.SparkSubmitSuite @ handles arguments with --key=val", "org.apache.spark.deploy.SparkSubmitSuite @ handles arguments to user program", "org.apache.spark.deploy.SparkSubmitSuite @ handles arguments to user program with name collision", "org.apache.spark.deploy.SparkSubmitSuite @ print the right queue name", "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-24241: do not fail fast if executor num is 0 when dynamic allocation is enabled", "org.apache.spark.deploy.SparkSubmitSuite @ specify deploy mode through configuration", "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN cluster mode", "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN client mode", "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-33530: handles standalone mode with archives", "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone cluster mode", "org.apache.spark.deploy.SparkSubmitSuite @ handles legacy standalone cluster mode", "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone client mode", "org.apache.spark.deploy.SparkSubmitSuite @ handles mesos client mode", "org.apache.spark.deploy.SparkSubmitSuite @ handles k8s cluster mode", "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in client mode", "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in cluster mode", "org.apache.spark.deploy.SparkSubmitSuite @ error informatively when mainClass isn't set and S3 JAR doesn't exist", "org.apache.spark.deploy.SparkSubmitSuite @ handles confs with flag equivalents", "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", "org.apache.spark.deploy.SparkSubmitSuite @ launch simple application with spark-submit", "org.apache.spark.deploy.SparkSubmitSuite @ launch simple application with spark-submit with redaction", "org.apache.spark.deploy.SparkSubmitSuite @ includes jars passed in through --jars", "org.apache.spark.deploy.SparkSubmitSuite @ includes jars passed in through --packages", "org.apache.spark.deploy.SparkSubmitSuite @ includes jars passed through spark.jars.packages and spark.jars.repositories", "org.apache.spark.deploy.SparkSubmitSuite @ correctly builds R packages included in a jar with --packages", "org.apache.spark.deploy.SparkSubmitSuite @ include an external JAR in SparkR", "org.apache.spark.deploy.SparkSubmitSuite @ resolves command line argument paths correctly", "org.apache.spark.deploy.SparkSubmitSuite @ ambiguous archive mapping results in error message", "org.apache.spark.deploy.SparkSubmitSuite @ resolves config paths correctly", "org.apache.spark.deploy.SparkSubmitSuite @ user classpath first in driver", "org.apache.spark.deploy.SparkSubmitSuite @ SPARK_CONF_DIR overrides spark-defaults.conf", "org.apache.spark.deploy.SparkSubmitSuite @ support glob path", "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-27575: yarn confs should merge new value with existing value", "org.apache.spark.deploy.SparkSubmitSuite @ downloadFile - invalid url", "org.apache.spark.deploy.SparkSubmitSuite @ downloadFile - file doesn't exist", "org.apache.spark.deploy.SparkSubmitSuite @ downloadFile does not download local file", "org.apache.spark.deploy.SparkSubmitSuite @ download one file to local", "org.apache.spark.deploy.SparkSubmitSuite @ download list of files to local", "org.apache.spark.deploy.SparkSubmitSuite @ remove copies of application jar from classpath", "org.apache.spark.deploy.SparkSubmitSuite @ Avoid re-upload remote resources in yarn client mode", "org.apache.spark.deploy.SparkSubmitSuite @ download remote resource if it is not supported by yarn service", "org.apache.spark.deploy.SparkSubmitSuite @ avoid downloading remote resource if it is supported by yarn service", "org.apache.spark.deploy.SparkSubmitSuite @ force download from forced schemes", "org.apache.spark.deploy.SparkSubmitSuite @ force download for all the schemes", "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-32119: Jars and files should be loaded when Executors launch for plugins", "org.apache.spark.deploy.SparkSubmitSuite @ start SparkApplication without modifying system properties", "org.apache.spark.deploy.SparkSubmitSuite @ support --py-files/spark.submit.pyFiles in non pyspark application", "org.apache.spark.deploy.SparkSubmitSuite @ handles natural line delimiters in --properties-file and --conf uniformly", "org.apache.spark.deploy.SparkSubmitSuite @ get a Spark configuration from arguments", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ incorrect maven coordinate throws error", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ create repo resolvers", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ create additional resolvers", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ add dependencies works correctly", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ excludes works correctly", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ ivy path works correctly", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ search for artifact at local repositories", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ dependency not found throws RuntimeException", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ neglects Spark and Spark's dependencies", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ exclude dependencies end to end", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ load ivy settings file", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ SPARK-10878: test resolution files cleaned after resolving artifact", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ SPARK-34624: should ignore non-jar dependencies", "org.apache.spark.deploy.SparkSubmitUtilsSuite @ SPARK-39501: Resolve maven dependenicy in IPv6", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", "org.apache.spark.deploy.worker.CommandUtilsSuite @ set libraryPath correctly", "org.apache.spark.deploy.worker.CommandUtilsSuite @ auth secret shouldn't appear in java opts", "org.apache.spark.deploy.worker.DriverRunnerTest @ Process succeeds instantly", "org.apache.spark.deploy.worker.DriverRunnerTest @ Process failing several times and then succeeding", "org.apache.spark.deploy.worker.DriverRunnerTest @ Process doesn't restart if not supervised", "org.apache.spark.deploy.worker.DriverRunnerTest @ Process doesn't restart if killed", "org.apache.spark.deploy.worker.DriverRunnerTest @ Reset of backoff counter", "org.apache.spark.deploy.worker.DriverRunnerTest @ Kill process finalized with state KILLED", "org.apache.spark.deploy.worker.DriverRunnerTest @ Finalized with state FINISHED", "org.apache.spark.deploy.worker.DriverRunnerTest @ Finalized with state FAILED", "org.apache.spark.deploy.worker.DriverRunnerTest @ Handle exception starting process", "org.apache.spark.deploy.worker.ExecutorRunnerTest @ command includes appId", "org.apache.spark.deploy.worker.ui.LogPageSuite @ get logs simple", "org.apache.spark.deploy.worker.WorkerArgumentsTest @ Memory can't be set to 0 when cmd line args leave off M or G", "org.apache.spark.deploy.worker.WorkerArgumentsTest @ Memory can't be set to 0 when SPARK_WORKER_MEMORY env property leaves off M or G", "org.apache.spark.deploy.worker.WorkerArgumentsTest @ Memory correctly set when SPARK_WORKER_MEMORY env property appends G", "org.apache.spark.deploy.worker.WorkerArgumentsTest @ Memory correctly set from args with M appended to memory value", "org.apache.spark.deploy.worker.WorkerSuite @ test isUseLocalNodeSSLConfig", "org.apache.spark.deploy.worker.WorkerSuite @ test maybeUpdateSSLSettings", "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans app dirs and shuffle metadata when spark.shuffle.service.db.enabled=true, spark.shuffle.service.db.backend=RocksDB", "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans app dirs and shuffle metadata when spark.shuffle.service.db.enabled=true, spark.shuffle.service.db.backend=LevelDB", "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher shuts down on valid disassociation", "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher stays alive on invalid disassociation", "org.apache.spark.DistributedSuite @ task throws not serializable exception", "org.apache.spark.DistributedSuite @ local-cluster format", "org.apache.spark.DistributedSuite @ simple groupByKey", "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", "org.apache.spark.DistributedSuite @ accumulators", "org.apache.spark.DistributedSuite @ broadcast variables", "org.apache.spark.DistributedSuite @ repeatedly failing task", "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", "org.apache.spark.DistributedSuite @ caching (encryption = off)", "org.apache.spark.DistributedSuite @ caching (encryption = on)", "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", "org.apache.spark.DistributedSuite @ caching in memory, replicated (encryption = off)", "org.apache.spark.DistributedSuite @ caching in memory, replicated (encryption = off) (with replication as stream)", "org.apache.spark.DistributedSuite @ caching in memory, replicated (encryption = on)", "org.apache.spark.DistributedSuite @ caching in memory, replicated (encryption = on) (with replication as stream)", "org.apache.spark.DistributedSuite @ caching in memory, serialized, replicated (encryption = off)", "org.apache.spark.DistributedSuite @ caching in memory, serialized, replicated (encryption = off) (with replication as stream)", "org.apache.spark.DistributedSuite @ caching in memory, serialized, replicated (encryption = on)", "org.apache.spark.DistributedSuite @ caching in memory, serialized, replicated (encryption = on) (with replication as stream)", "org.apache.spark.DistributedSuite @ caching on disk, replicated 2 (encryption = off)", "org.apache.spark.DistributedSuite @ caching on disk, replicated 2 (encryption = off) (with replication as stream)", "org.apache.spark.DistributedSuite @ caching on disk, replicated 2 (encryption = on)", "org.apache.spark.DistributedSuite @ caching on disk, replicated 2 (encryption = on) (with replication as stream)", "org.apache.spark.DistributedSuite @ caching on disk, replicated 3 (encryption = off)", "org.apache.spark.DistributedSuite @ caching on disk, replicated 3 (encryption = off) (with replication as stream)", "org.apache.spark.DistributedSuite @ caching on disk, replicated 3 (encryption = on)", "org.apache.spark.DistributedSuite @ caching on disk, replicated 3 (encryption = on) (with replication as stream)", "org.apache.spark.DistributedSuite @ caching in memory and disk, replicated (encryption = off)", "org.apache.spark.DistributedSuite @ caching in memory and disk, replicated (encryption = off) (with replication as stream)", "org.apache.spark.DistributedSuite @ caching in memory and disk, replicated (encryption = on)", "org.apache.spark.DistributedSuite @ caching in memory and disk, replicated (encryption = on) (with replication as stream)", "org.apache.spark.DistributedSuite @ caching in memory and disk, serialized, replicated (encryption = off)", "org.apache.spark.DistributedSuite @ caching in memory and disk, serialized, replicated (encryption = off) (with replication as stream)", "org.apache.spark.DistributedSuite @ caching in memory and disk, serialized, replicated (encryption = on)", "org.apache.spark.DistributedSuite @ caching in memory and disk, serialized, replicated (encryption = on) (with replication as stream)", "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", "org.apache.spark.DistributedSuite @ passing environment variables to cluster", "org.apache.spark.DistributedSuite @ recover from node failures", "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", "org.apache.spark.DistributedSuite @ recover from node failures with replication", "org.apache.spark.DistributedSuite @ unpersist RDDs", "org.apache.spark.DistributedSuite @ reference partitions inside a task", "org.apache.spark.DriverSuite @ driver should exit after finishing without cleanup (SPARK-530)", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing no resources", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing one resource", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources resource profile", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required resource profile", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option with resource profile", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ SPARK-24203 when bindAddress is not set, it defaults to hostname", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ SPARK-24203 when bindAddress is different, it does not default to hostname", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ Tasks launched should always be cancelled.", "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ Tasks not launched should always be cancelled.", "org.apache.spark.executor.ExecutorMetricsPollerSuite @ SPARK-34779: stage entry shouldn't be removed before a heartbeat occurs", "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", "org.apache.spark.executor.ExecutorSuite @ SPARK-33587: isFatalError", "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", "org.apache.spark.executor.ProcfsMetricsGetterSuite @ testGetProcessInfo", "org.apache.spark.executor.ProcfsMetricsGetterSuite @ SPARK-34845: partial metrics shouldn't be returned", "org.apache.spark.executor.TaskMetricsSuite @ mutating values", "org.apache.spark.executor.TaskMetricsSuite @ mutating shuffle read metrics values", "org.apache.spark.executor.TaskMetricsSuite @ mutating shuffle write metrics values", "org.apache.spark.executor.TaskMetricsSuite @ mutating input metrics values", "org.apache.spark.executor.TaskMetricsSuite @ mutating output metrics values", "org.apache.spark.executor.TaskMetricsSuite @ merging multiple shuffle read metrics", "org.apache.spark.executor.TaskMetricsSuite @ additional accumulables", "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", "org.apache.spark.FailureSuite @ failure in a single-stage job", "org.apache.spark.FailureSuite @ failure in a two-stage job", "org.apache.spark.FailureSuite @ failure in a map stage", "org.apache.spark.FailureSuite @ failure because task results are not serializable", "org.apache.spark.FailureSuite @ failure because task closure is not serializable", "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", "org.apache.spark.FileSuite @ text files", "org.apache.spark.FileSuite @ text files (compressed)", "org.apache.spark.FileSuite @ text files do not allow null rows", "org.apache.spark.FileSuite @ SequenceFiles", "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", "org.apache.spark.FileSuite @ SequenceFile with writable key", "org.apache.spark.FileSuite @ SequenceFile with writable value", "org.apache.spark.FileSuite @ SequenceFile with writable key and value", "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", "org.apache.spark.FileSuite @ object files of ints", "org.apache.spark.FileSuite @ object files of complex types", "org.apache.spark.FileSuite @ object files of classes from a JAR", "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", "org.apache.spark.FileSuite @ binary file input as byte array", "org.apache.spark.FileSuite @ portabledatastream caching tests", "org.apache.spark.FileSuite @ portabledatastream persist disk storage", "org.apache.spark.FileSuite @ portabledatastream flatmap tests", "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", "org.apache.spark.FileSuite @ fixed record length binary file as byte array", "org.apache.spark.FileSuite @ negative binary record length should raise an exception", "org.apache.spark.FileSuite @ file caching", "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", "org.apache.spark.FileSuite @ Get input files via old Hadoop API", "org.apache.spark.FileSuite @ Get input files via new Hadoop API", "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", "org.apache.spark.FutureActionSuite @ simple async action", "org.apache.spark.FutureActionSuite @ complex async action", "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", "org.apache.spark.input.WholeTextFileInputFormatSuite @ for small files minimum split size per node and per rack should be less than or equal to maximum split size.", "org.apache.spark.input.WholeTextFileRecordReaderSuite @ Correctness of WholeTextFileRecordReader.", "org.apache.spark.input.WholeTextFileRecordReaderSuite @ Correctness of WholeTextFileRecordReader with GzipCodec.", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: int", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: long", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: double", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: boolean", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: optional", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: fallback", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: time", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: bytes", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: regex", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: string seq", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: int seq", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: transformation", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: checkValue()", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: valid values check", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: conversion error", "org.apache.spark.internal.config.ConfigEntrySuite @ default value handling is null-safe", "org.apache.spark.internal.config.ConfigEntrySuite @ variable expansion of spark config entries", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry : default function", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: alternative keys", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: prepend with default separator", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: prepend with custom separator", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: prepend with fallback", "org.apache.spark.internal.config.ConfigEntrySuite @ conf entry: prepend should work only with string type", "org.apache.spark.internal.config.ConfigEntrySuite @ onCreate", "org.apache.spark.internal.config.ConfigReaderSuite @ variable expansion", "org.apache.spark.internal.config.ConfigReaderSuite @ circular references", "org.apache.spark.internal.config.ConfigReaderSuite @ spark conf provider filters config keys", "org.apache.spark.internal.io.FileCommitProtocolInstantiationSuite @ Dynamic partitions require appropriate constructor", "org.apache.spark.internal.io.FileCommitProtocolInstantiationSuite @ Standard partitions work with classic constructor", "org.apache.spark.internal.io.FileCommitProtocolInstantiationSuite @ Three arg constructors have priority", "org.apache.spark.internal.io.FileCommitProtocolInstantiationSuite @ Three arg constructors have priority when dynamic", "org.apache.spark.internal.io.FileCommitProtocolInstantiationSuite @ The protocol must be of the correct class", "org.apache.spark.internal.io.FileCommitProtocolInstantiationSuite @ If there is no matching constructor, class hierarchy is irrelevant", "org.apache.spark.internal.io.SparkHadoopWriterUtilsSuite @ JobID Generation", "org.apache.spark.internal.io.SparkHadoopWriterUtilsSuite @ JobIDs generated at same time are different", "org.apache.spark.internal.io.SparkHadoopWriterUtilsSuite @ JobIDs with negative job number", "org.apache.spark.internal.io.SparkHadoopWriterUtilsSuite @ JobIDs on Epoch are different", "org.apache.spark.internal.LoggingSuite @ spark-shell logging filter", "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", "org.apache.spark.internal.plugin.PluginContainerSuite @ do nothing if plugins are not configured", "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in TaskContext", "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", "org.apache.spark.io.ChunkedByteBufferFileRegionSuite @ transferTo can stop and resume correctly", "org.apache.spark.io.ChunkedByteBufferFileRegionSuite @ transfer to with random limits", "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", "org.apache.spark.io.CompressionCodecSuite @ default compression codec", "org.apache.spark.io.CompressionCodecSuite @ lz4 compression codec", "org.apache.spark.io.CompressionCodecSuite @ lz4 compression codec short form", "org.apache.spark.io.CompressionCodecSuite @ lz4 supports concatenation of serialized streams", "org.apache.spark.io.CompressionCodecSuite @ lzf compression codec", "org.apache.spark.io.CompressionCodecSuite @ lzf compression codec short form", "org.apache.spark.io.CompressionCodecSuite @ lzf supports concatenation of serialized streams", "org.apache.spark.io.CompressionCodecSuite @ snappy compression codec", "org.apache.spark.io.CompressionCodecSuite @ snappy compression codec short form", "org.apache.spark.io.CompressionCodecSuite @ snappy supports concatenation of serialized streams", "org.apache.spark.io.CompressionCodecSuite @ zstd compression codec", "org.apache.spark.io.CompressionCodecSuite @ zstd compression codec short form", "org.apache.spark.io.CompressionCodecSuite @ zstd supports concatenation of serialized zstd", "org.apache.spark.io.CompressionCodecSuite @ bad compression codec", "org.apache.spark.io.NioBufferedInputStreamSuite @ testReadMultipleBytes", "org.apache.spark.io.NioBufferedInputStreamSuite @ testSkipFromFileChannel", "org.apache.spark.io.NioBufferedInputStreamSuite @ testNegativeBytesSkippedAfterRead", "org.apache.spark.io.NioBufferedInputStreamSuite @ testBytesSkippedAfterEOF", "org.apache.spark.io.NioBufferedInputStreamSuite @ testBytesSkippedAfterRead", "org.apache.spark.io.NioBufferedInputStreamSuite @ testBytesSkipped", "org.apache.spark.io.NioBufferedInputStreamSuite @ testReadOneByte", "org.apache.spark.io.NioBufferedInputStreamSuite @ testReadPastEOF", "org.apache.spark.JobCancellationSuite @ local mode, FIFO scheduler", "org.apache.spark.JobCancellationSuite @ local mode, fair scheduler", "org.apache.spark.JobCancellationSuite @ cluster mode, FIFO scheduler", "org.apache.spark.JobCancellationSuite @ cluster mode, fair scheduler", "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", "org.apache.spark.JobCancellationSuite @ job group", "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", "org.apache.spark.JobCancellationSuite @ job group with interruption", "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", "org.apache.spark.launcher.LauncherBackendSuite @ local: launcher handle", "org.apache.spark.launcher.LauncherBackendSuite @ standalone/client: launcher handle", "org.apache.spark.MapOutputTrackerSuite @ master start and stop", "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", "org.apache.spark.MapOutputTrackerSuite @ remote fetch", "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", "org.apache.spark.MapOutputTrackerSuite @ min broadcast size exceeds max RPC message size", "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", "org.apache.spark.MapOutputTrackerSuite @ equally divide map statistics tasks", "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: getPreferredLocationsForShuffle with MergeStatus", "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: unregister merge result if it is present and contains the map Id", "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize and deserialize over 2GB compressed mapStatuses", "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", "org.apache.spark.MapOutputTrackerSuite @ SPARK-37023: Avoid fetching merge status when useMergeResult is false", "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", "org.apache.spark.memory.TestMemoryManagerSuite @ tracks allocated execution memory by task", "org.apache.spark.memory.TestMemoryManagerSuite @ markconsequentOOM", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ single task requesting on-heap execution memory", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ two tasks requesting full on-heap execution memory", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ two tasks cannot grow past 1 / N of on-heap execution memory", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ tasks can block to get at least 1 / 2N of on-heap execution memory", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ SPARK-35486: memory freed by self-spilling is taken by another task", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ TaskMemoryManager.cleanUpAllAllocatedMemory", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ tasks should not be granted a negative amount of execution memory", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ off-heap execution allocations cannot exceed limit", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ basic execution memory", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ basic storage memory", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution evicts storage", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution memory requests smaller than free memory should evict storage (SPARK-12165)", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ storage does not evict execution", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ small heap", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ insufficient executor memory", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution can evict cached blocks when there are multiple active tasks (SPARK-12155)", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ SPARK-15260: atomically resize memory pools", "org.apache.spark.memory.UnifiedMemoryManagerSuite @ not enough free memory in the storage pool --OFF_HEAP", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", "org.apache.spark.metrics.MetricsConfigSuite @ MetricsConfig with default properties", "org.apache.spark.metrics.MetricsConfigSuite @ MetricsConfig with properties set from a file", "org.apache.spark.metrics.MetricsConfigSuite @ MetricsConfig with properties set from a Spark configuration", "org.apache.spark.metrics.MetricsConfigSuite @ MetricsConfig with properties set from a file and a Spark configuration", "org.apache.spark.metrics.MetricsConfigSuite @ MetricsConfig with subProperties", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with default config", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with sources add", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Driver instance", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Driver instance and spark.app.id is not set", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Driver instance and spark.executor.id is not set", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Executor instance", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Executor instance and spark.app.id is not set", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Executor instance and spark.executor.id is not set", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with instance which is neither Driver nor Executor", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Executor instance, with custom namespace", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Executor instance, custom namespace which is not set", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Executor instance, custom namespace, spark.executor.id not set", "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with non-driver, non-executor instance with custom namespace", "org.apache.spark.metrics.MetricsSystemSuite @ SPARK-37078: Support old 3-parameter Sink constructors", "org.apache.spark.metrics.sink.GraphiteSinkSuite @ GraphiteSink with default MetricsFilter", "org.apache.spark.metrics.sink.GraphiteSinkSuite @ GraphiteSink with regex MetricsFilter", "org.apache.spark.metrics.sink.GraphiteSinkSuite @ GraphiteSink without host", "org.apache.spark.metrics.sink.GraphiteSinkSuite @ GraphiteSink without port", "org.apache.spark.metrics.sink.GraphiteSinkSuite @ GraphiteSink with invalid protocol", "org.apache.spark.metrics.sink.PrometheusServletSuite @ register metrics", "org.apache.spark.metrics.sink.PrometheusServletSuite @ normalize key", "org.apache.spark.metrics.sink.StatsdSinkSuite @ metrics StatsD sink with Counter", "org.apache.spark.metrics.sink.StatsdSinkSuite @ metrics StatsD sink with Gauge", "org.apache.spark.metrics.sink.StatsdSinkSuite @ metrics StatsD sink with Histogram", "org.apache.spark.metrics.sink.StatsdSinkSuite @ metrics StatsD sink with Timer", "org.apache.spark.metrics.source.AccumulatorSourceSuite @ that that accumulators register against the metric system's register", "org.apache.spark.metrics.source.AccumulatorSourceSuite @ the accumulators value property is checked when the gauge's value is requested", "org.apache.spark.metrics.source.AccumulatorSourceSuite @ the double accumulators value property is checked when the gauge's value is requested", "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", "org.apache.spark.network.BlockTransferServiceSuite @ fetchBlockSync should not hang when BlockFetchingListener.onBlockFetchSuccess fails", "org.apache.spark.network.netty.NettyBlockRpcServerSuite @ SPARK-38830: Rethrow IllegalArgumentException due to `Unknown message type`", "org.apache.spark.network.netty.NettyBlockRpcServerSuite @ SPARK-38830: Warn and ignore NegativeArraySizeException due to the corruption", "org.apache.spark.network.netty.NettyBlockRpcServerSuite @ SPARK-38830: Warn and ignore IndexOutOfBoundsException due to the corruption", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a random port", "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to two random ports", "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port", "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port twice and the second increments", "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", "org.apache.spark.network.netty.SparkTransportConfSuite @ default value is get when neither role nor module is set", "org.apache.spark.network.netty.SparkTransportConfSuite @ module value is get when role is not set", "org.apache.spark.network.netty.SparkTransportConfSuite @ use correct configuration when both module and role configs are present", "org.apache.spark.partial.CountEvaluatorSuite @ test count 0", "org.apache.spark.partial.CountEvaluatorSuite @ test count >= 1", "org.apache.spark.partial.MeanEvaluatorSuite @ test count 0", "org.apache.spark.partial.MeanEvaluatorSuite @ test count 1", "org.apache.spark.partial.MeanEvaluatorSuite @ test count > 1", "org.apache.spark.partial.SumEvaluatorSuite @ correct handling of count 1", "org.apache.spark.partial.SumEvaluatorSuite @ correct handling of count 0", "org.apache.spark.partial.SumEvaluatorSuite @ correct handling of NaN", "org.apache.spark.partial.SumEvaluatorSuite @ correct handling of > 1 values", "org.apache.spark.partial.SumEvaluatorSuite @ test count > 1", "org.apache.spark.PartitioningSuite @ HashPartitioner equality", "org.apache.spark.PartitioningSuite @ RangePartitioner equality", "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", "org.apache.spark.PartitioningSuite @ partitioner preservation", "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", "org.apache.spark.PartitioningSuite @ defaultPartitioner", "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", "org.apache.spark.rdd.AsyncRDDActionsSuite @ FutureAction result, infinite wait", "org.apache.spark.rdd.AsyncRDDActionsSuite @ FutureAction result, finite wait", "org.apache.spark.rdd.AsyncRDDActionsSuite @ FutureAction result, timeout", "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", "org.apache.spark.rdd.DoubleRDDSuite @ sum", "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", "org.apache.spark.rdd.HadoopRDDSuite @ SPARK-38922: HadoopRDD convertSplitLocationInfo contains Some(null) cause NPE", "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ one element per slice", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ one slice", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ equal slices", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ non-equal slices", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ splitting exclusive range", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ splitting inclusive range", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ empty data", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ zero slices", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ negative number of slices", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ exclusive ranges sliced into ranges", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ inclusive ranges sliced into ranges", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ identical slice sizes between Range and NumericRange", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ identical slice sizes between List and NumericRange", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ large ranges don't overflow", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ random array tests", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ random exclusive range tests", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ random inclusive range tests", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ exclusive ranges of longs", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ inclusive ranges of longs", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ exclusive ranges of doubles", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ inclusive ranges of doubles", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ inclusive ranges with Int.MaxValue and Int.MinValue", "org.apache.spark.rdd.ParallelCollectionSplitSuite @ empty ranges with Int.MaxValue and Int.MinValue", "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", "org.apache.spark.rdd.RDDSuite @ basic operations", "org.apache.spark.rdd.RDDSuite @ serialization", "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", "org.apache.spark.rdd.RDDSuite @ SparkContext.union", "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", "org.apache.spark.rdd.RDDSuite @ partitioner aware union", "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", "org.apache.spark.rdd.RDDSuite @ fold", "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", "org.apache.spark.rdd.RDDSuite @ aggregate", "org.apache.spark.rdd.RDDSuite @ treeAggregate", "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", "org.apache.spark.rdd.RDDSuite @ treeReduce", "org.apache.spark.rdd.RDDSuite @ basic caching", "org.apache.spark.rdd.RDDSuite @ caching with failures", "org.apache.spark.rdd.RDDSuite @ empty RDD", "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality, large scale (10K partitions)", "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality, large scale (10K partitions)", "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality, fail first pass", "org.apache.spark.rdd.RDDSuite @ zipped RDDs", "org.apache.spark.rdd.RDDSuite @ partition pruning", "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", "org.apache.spark.rdd.RDDSuite @ take", "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", "org.apache.spark.rdd.RDDSuite @ top with custom ordering", "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", "org.apache.spark.rdd.RDDSuite @ isEmpty", "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", "org.apache.spark.rdd.RDDSuite @ takeSample", "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", "org.apache.spark.rdd.RDDSuite @ randomSplit", "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", "org.apache.spark.rdd.RDDSuite @ sortByKey", "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", "org.apache.spark.rdd.RDDSuite @ intersection", "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", "org.apache.spark.rdd.RDDSuite @ zipWithIndex", "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", "org.apache.spark.rdd.RDDSuite @ parent method", "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", "org.apache.spark.rdd.SortingSuite @ sortByKey", "org.apache.spark.rdd.SortingSuite @ large array", "org.apache.spark.rdd.SortingSuite @ large array with one split", "org.apache.spark.rdd.SortingSuite @ large array with many partitions", "org.apache.spark.rdd.SortingSuite @ sort descending", "org.apache.spark.rdd.SortingSuite @ sort descending with one split", "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", "org.apache.spark.rdd.SortingSuite @ more partitions than elements", "org.apache.spark.rdd.SortingSuite @ empty RDD", "org.apache.spark.rdd.SortingSuite @ partition balancing", "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", "org.apache.spark.resource.ResourceInformationSuite @ ResourceInformation.parseJson for valid JSON", "org.apache.spark.resource.ResourceInformationSuite @ ResourceInformation.equals/hashCode", "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager", "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn no dynamic allocation", "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn with dynamic allocation", "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported k8s with dynamic allocation", "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported standalone with dynamic allocation", "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported with local mode", "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager has equivalent profile", "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile", "org.apache.spark.resource.ResourceProfileSuite @ Executor cores should be None by default for standalone cluster", "org.apache.spark.resource.ResourceProfileSuite @ Get resource for standalone cluster", "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile with app level resources specified", "org.apache.spark.resource.ResourceProfileSuite @ test default profile task gpus fractional", "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor cpus", "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting no executor cores", "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting no other resource no executor cores", "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting executor cores", "org.apache.spark.resource.ResourceProfileSuite @ Create ResourceProfile", "org.apache.spark.resource.ResourceProfileSuite @ test ResourceProfiles equal", "org.apache.spark.resource.ResourceProfileSuite @ Test ExecutorResourceRequests memory helpers", "org.apache.spark.resource.ResourceProfileSuite @ Test TaskResourceRequest fractional", "org.apache.spark.resource.ResourceProfileSuite @ ResourceProfile has correct custom executor resources", "org.apache.spark.resource.ResourceProfileSuite @ ResourceProfile has correct custom task resources", "org.apache.spark.resource.ResourceUtilsSuite @ ResourceID", "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer no addresses errors", "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer amount 0", "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer multiple resource types", "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover the remaining", "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover resource profile remaining", "org.apache.spark.resource.ResourceUtilsSuite @ list resource ids", "org.apache.spark.resource.ResourceUtilsSuite @ parse resource request", "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer multiple gpus on driver", "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer script returns mismatched name", "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer with invalid class", "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer script returns invalid format", "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer script doesn't exist", "org.apache.spark.resource.ResourceUtilsSuite @ gpu's specified but not a discovery script", "org.apache.spark.rpc.netty.InboxSuite @ post", "org.apache.spark.rpc.netty.InboxSuite @ post: with reply", "org.apache.spark.rpc.netty.InboxSuite @ post: multiple threads", "org.apache.spark.rpc.netty.InboxSuite @ post: Associated", "org.apache.spark.rpc.netty.InboxSuite @ post: Disassociated", "org.apache.spark.rpc.netty.InboxSuite @ post: AssociationError", "org.apache.spark.rpc.netty.InboxSuite @ SPARK-32738: should reduce the number of active threads when fatal error happens", "org.apache.spark.rpc.netty.NettyRpcAddressSuite @ toString", "org.apache.spark.rpc.netty.NettyRpcAddressSuite @ toString for client mode", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message locally", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message remotely", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a RpcEndpointRef", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message locally", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message remotely", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message abort", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onStart and onStop", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStart", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStop", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in receive", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStart", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in receive", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStop", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ call receive in sequence", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ stop(RpcEndpointRef) reentrant", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: error", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely error", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in server mode", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in client mode", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in client RpcEnv when another RpcEnv is in server mode", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: unserializable error", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ port conflict", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with authentication", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with SASL encryption", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with AES encryption", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with authentication", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with SASL encryption", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with AES encryption", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ construct RpcTimeout with conf property", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout on Future using RpcTimeout", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ isolated endpoints", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ non-existent endpoint", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ advertise address different from bind address", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ RequestMessage serialization", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ StackOverflowError should be sent back and Dispatcher should survive", "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-31233: ask rpcEndpointRef in client mode timeout", "org.apache.spark.rpc.netty.NettyRpcHandlerSuite @ receive", "org.apache.spark.rpc.netty.NettyRpcHandlerSuite @ connectionTerminated", "org.apache.spark.rpc.RpcAddressSuite @ hostPort", "org.apache.spark.rpc.RpcAddressSuite @ fromSparkURL", "org.apache.spark.rpc.RpcAddressSuite @ fromSparkURL: a typo url", "org.apache.spark.rpc.RpcAddressSuite @ fromSparkURL: invalid scheme", "org.apache.spark.rpc.RpcAddressSuite @ toSparkURL", "org.apache.spark.rpc.RpcAddressSuite @ SPARK-39468: IPv6 hostPort", "org.apache.spark.rpc.RpcAddressSuite @ SPARK-39468: IPv6 fromSparkURL", "org.apache.spark.rpc.RpcAddressSuite @ SPARK-39468: IPv6 toSparkURL", "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", "org.apache.spark.scheduler.BarrierTaskContextSuite @ barrier task killed, no interrupt", "org.apache.spark.scheduler.BarrierTaskContextSuite @ barrier task killed, interrupt", "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ exec alloc decrease.", "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", "org.apache.spark.scheduler.DAGSchedulerSuite @ equals and hashCode AccumulableInfo", "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", "org.apache.spark.scheduler.DAGSchedulerSuite @ Single stage fetch failure should not abort the stage.", "org.apache.spark.scheduler.DAGSchedulerSuite @ Multiple consecutive stage fetch failures should lead to job being aborted.", "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", "org.apache.spark.scheduler.DAGSchedulerSuite @ stage used by two jobs, the first no longer active (SPARK-6880)", "org.apache.spark.scheduler.DAGSchedulerSuite @ stage used by two jobs, some fetch failures, and the first job no longer active (SPARK-6880)", "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-30388: shuffle fetch failed on speculative task, but original task succeed", "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-17644: After one stage is aborted for too many failed attempts, subsequent stagesstill behave correctly on fetch failures", "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-19263] DAGScheduler should not submit multiple active tasksets, even with late completions from earlier stage attempts", "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", "org.apache.spark.scheduler.DAGSchedulerSuite @ test 1 resource profile", "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", "org.apache.spark.scheduler.DAGSchedulerSuite @ test merge 2 resource profiles multiple configs", "org.apache.spark.scheduler.DAGSchedulerSuite @ test merge 3 resource profiles", "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ basic executor timeout", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-4951, SPARK-26927: handle out of order task start events", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track tasks running on executor", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ use appropriate time out depending on whether blocks are stored", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ keeps track of stored blocks for each rdd and split", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ handle timeouts correctly with multiple executors", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-38019: timedOutExecutors should be deterministic", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-27677: don't track blocks stored on disk when using shuffle service", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track executors pending for removal", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle block tracking", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28839: Avoids NPE in context cleaner when shuffle service is on", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle tracking with multiple executors and concurrent jobs", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28455: avoid overflow in timeout calculation", "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-37688: ignore SparkListenerBlockUpdated event if executor was not active", "org.apache.spark.scheduler.EventLoggingListenerSuite @ Basic event logging with compression", "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", "org.apache.spark.scheduler.EventLoggingListenerSuite @ Event logging with password redaction", "org.apache.spark.scheduler.EventLoggingListenerSuite @ Spark-33504 sensitive attributes redaction in properties", "org.apache.spark.scheduler.EventLoggingListenerSuite @ Executor metrics update", "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", "org.apache.spark.scheduler.ExecutorResourceInfoSuite @ Track Executor Resource information", "org.apache.spark.scheduler.ExecutorResourceInfoSuite @ Don't allow acquire address that is not available", "org.apache.spark.scheduler.ExecutorResourceInfoSuite @ Don't allow acquire address that doesn't exist", "org.apache.spark.scheduler.ExecutorResourceInfoSuite @ Don't allow release address that is not assigned", "org.apache.spark.scheduler.ExecutorResourceInfoSuite @ Don't allow release address that doesn't exist", "org.apache.spark.scheduler.ExecutorResourceInfoSuite @ Ensure that we can acquire the same fractions of a resource from an executor", "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ If preferred node is bad, without excludeOnFailure job will fail", "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ With default settings, job can succeed despite multiple bad executors on node", "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ Bad node with multiple executors, job will still succeed with the right confs", "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", "org.apache.spark.scheduler.HealthTrackerSuite @ excluding kills executors, configured by EXCLUDE_ON_FAILURE_KILL_ENABLED", "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", "org.apache.spark.scheduler.HealthTrackerSuite @ fetch failure excluding kills executors, configured by EXCLUDE_ON_FAILURE_KILL_ENABLED", "org.apache.spark.scheduler.JobWaiterSuite @ call jobFailed multiple times", "org.apache.spark.scheduler.MapStatusSuite @ compressSize", "org.apache.spark.scheduler.MapStatusSuite @ decompressSize", "org.apache.spark.scheduler.MapStatusSuite @ MapStatus should never report non-empty blocks' sizes as 0", "org.apache.spark.scheduler.MapStatusSuite @ large tasks should use org.apache.spark.scheduler.HighlyCompressedMapStatus", "org.apache.spark.scheduler.MapStatusSuite @ HighlyCompressedMapStatus: estimated size should be the average non-empty block size", "org.apache.spark.scheduler.MapStatusSuite @ SPARK-22540: ensure HighlyCompressedMapStatus calculates correct avgSize", "org.apache.spark.scheduler.MapStatusSuite @ RoaringBitmap: runOptimize succeeded", "org.apache.spark.scheduler.MapStatusSuite @ RoaringBitmap: runOptimize failed", "org.apache.spark.scheduler.MapStatusSuite @ Blocks which are bigger than SHUFFLE_ACCURATE_BLOCK_THRESHOLD should not be underestimated.", "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", "org.apache.spark.scheduler.MapStatusSuite @ SPARK-36967: HighlyCompressedMapStatus should record accurately the size of skewed shuffle blocks", "org.apache.spark.scheduler.MapStatusSuite @ SPARK-36967: Limit accurate skewed block number if too many blocks are skewed", "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ If commit fails, if task is retried it should not be locked, and will succeed.", "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", "org.apache.spark.scheduler.PoolSuite @ Pool should throw IllegalArgumentException when schedulingMode is not supported", "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", "org.apache.spark.scheduler.ReplayListenerSuite @ Simple replay", "org.apache.spark.scheduler.ReplayListenerSuite @ Replay compressed inprogress log file succeeding on partial read", "org.apache.spark.scheduler.ReplayListenerSuite @ Replay incompatible event log", "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", "org.apache.spark.scheduler.SparkListenerSuite @ basic creation and shutdown of LiveListenerBus", "org.apache.spark.scheduler.SparkListenerSuite @ bus.stop() waits for the event queue to completely drain", "org.apache.spark.scheduler.SparkListenerSuite @ metrics for dropped listener events", "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", "org.apache.spark.scheduler.SparkListenerSuite @ onTaskEnd() should be called for all started tasks, even after job has been killed", "org.apache.spark.scheduler.SparkListenerSuite @ SparkListener moves on if a listener throws an exception", "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", "org.apache.spark.scheduler.SparkListenerSuite @ add and remove listeners to/from LiveListenerBus queues", "org.apache.spark.scheduler.SparkListenerSuite @ interrupt within listener is handled correctly: throw interrupt", "org.apache.spark.scheduler.SparkListenerSuite @ interrupt within listener is handled correctly: set Thread interrupted", "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-30285: Fix deadlock in AsyncEventQueue.removeListenerOnError: throw interrupt", "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-30285: Fix deadlock in AsyncEventQueue.removeListenerOnError: set Thread interrupted", "org.apache.spark.scheduler.SparkListenerSuite @ event queue size can be configured through spark conf", "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", "org.apache.spark.scheduler.TaskContextSuite @ all TaskCompletionListeners should be called even if some fail", "org.apache.spark.scheduler.TaskContextSuite @ all TaskFailureListeners should be called even if some fail", "org.apache.spark.scheduler.TaskContextSuite @ FailureListener throws after task body fails", "org.apache.spark.scheduler.TaskContextSuite @ CompletionListener throws after task body fails", "org.apache.spark.scheduler.TaskContextSuite @ CompletionListener throws after task body succeeds", "org.apache.spark.scheduler.TaskContextSuite @ FailureListener throws after task body succeeds and CompletionListener fails", "org.apache.spark.scheduler.TaskContextSuite @ CompletionListener throws after task body succeeds and CompletionListener fails", "org.apache.spark.scheduler.TaskContextSuite @ CompletionListener throws after task body fails and FailureListener fails", "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.attemptNumber should return attempt number, not task id (SPARK-4014)", "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", "org.apache.spark.scheduler.TaskContextSuite @ immediately call a completion listener if the context is completed", "org.apache.spark.scheduler.TaskContextSuite @ immediately call a failure listener if the context has failed", "org.apache.spark.scheduler.TaskContextSuite @ TaskCompletionListenerException.getMessage should include previousError", "org.apache.spark.scheduler.TaskContextSuite @ all TaskCompletionListeners should be called even if some fail or a task", "org.apache.spark.scheduler.TaskContextSuite @ listener registers another listener (reentrancy)", "org.apache.spark.scheduler.TaskContextSuite @ listener registers another listener using a second thread", "org.apache.spark.scheduler.TaskContextSuite @ listeners registered from different threads are called sequentially", "org.apache.spark.scheduler.TaskContextSuite @ listeners registered from same thread are called in reverse order", "org.apache.spark.scheduler.TaskDescriptionSuite @ encoding and then decoding a TaskDescription results in the same TaskDescription", "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", "org.apache.spark.scheduler.TaskResultGetterSuite @ task result size is set on the driver, not the executors", "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a zombie attempt finishes, continue scheduling tasks for non-zombie attempts", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't abort if there is an executor available, though it hasn't had scheduled tasks yet", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler checks for executors that can be expired from excludeOnFailure", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ With delay scheduling off, tasks can be run at any locality level immediately", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile, but not enough gpus", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", "org.apache.spark.scheduler.TaskSetExcludelistSuite @ Excluding tasks, executors, and nodes", "org.apache.spark.scheduler.TaskSetExcludelistSuite @ multiple attempts for the same task count once", "org.apache.spark.scheduler.TaskSetExcludelistSuite @ only exclude nodes for the task set when all the excluded executors are all on same host", "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", "org.apache.spark.scheduler.TaskSetManagerSuite @ executors should be excluded after task failure, in spite of locality preferences", "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", "org.apache.spark.scheduler.TaskSetManagerSuite @ Test that locations with HDFSCacheTaskLocation are treated as PROCESS_LOCAL.", "org.apache.spark.scheduler.TaskSetManagerSuite @ Test TaskLocation for different host type.", "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", "org.apache.spark.scheduler.TaskSetManagerSuite @ don't update excludelist for shuffle-fetch failures, preemption, denied commits, or killed tasks", "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 when a speculation time threshold is provided, should speculative run the task even if there are not enough successful runs, total tasks: 1", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976: when the speculation time threshold is not provided,don't speculative run if there are not enough successful runs, total tasks: 1", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 when a speculation time threshold is provided, should speculative run the task even if there are not enough successful runs, total tasks: 2", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976: when the speculation time threshold is not provided,don't speculative run if there are not enough successful runs, total tasks: 2", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 when a speculation time threshold is provided, should not speculative if there are too many tasks in the stage even though time threshold is provided", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30417 when spark.task.cpus is greater than spark.executor.cores due to standalone settings, speculate if there is only one task in the stage", "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", "org.apache.spark.security.CryptoStreamUtilsSuite @ crypto configuration conversion", "org.apache.spark.security.CryptoStreamUtilsSuite @ shuffle encryption key length should be 128 by default", "org.apache.spark.security.CryptoStreamUtilsSuite @ create 256-bit key", "org.apache.spark.security.CryptoStreamUtilsSuite @ create key with invalid length", "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", "org.apache.spark.security.CryptoStreamUtilsSuite @ crypto stream wrappers", "org.apache.spark.security.CryptoStreamUtilsSuite @ error handling wrapper", "org.apache.spark.security.SocketAuthHelperSuite @ successful auth", "org.apache.spark.security.SocketAuthHelperSuite @ failed auth", "org.apache.spark.SecurityManagerSuite @ set security with conf", "org.apache.spark.SecurityManagerSuite @ set security with conf for groups", "org.apache.spark.SecurityManagerSuite @ set security with api", "org.apache.spark.SecurityManagerSuite @ set security with api for groups", "org.apache.spark.SecurityManagerSuite @ set security modify acls", "org.apache.spark.SecurityManagerSuite @ set security modify acls for groups", "org.apache.spark.SecurityManagerSuite @ set security admin acls", "org.apache.spark.SecurityManagerSuite @ set security admin acls for groups", "org.apache.spark.SecurityManagerSuite @ set security with * in acls", "org.apache.spark.SecurityManagerSuite @ set security with * in acls for groups", "org.apache.spark.SecurityManagerSuite @ security for groups default behavior", "org.apache.spark.SecurityManagerSuite @ missing secret authentication key", "org.apache.spark.SecurityManagerSuite @ secret authentication key", "org.apache.spark.SecurityManagerSuite @ use executor-specific secret file configuration.", "org.apache.spark.SecurityManagerSuite @ secret file must be defined in both driver and executor", "org.apache.spark.SecurityManagerSuite @ master yarn cannot use file mounted secrets", "org.apache.spark.SecurityManagerSuite @ master local cannot use file mounted secrets", "org.apache.spark.SecurityManagerSuite @ master local[*] cannot use file mounted secrets", "org.apache.spark.SecurityManagerSuite @ master local[1,2] cannot use file mounted secrets", "org.apache.spark.SecurityManagerSuite @ master mesos://localhost:8080 cannot use file mounted secrets", "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'yarn'", "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'local'", "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'local[*]'", "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'local[1, 2]'", "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'k8s://127.0.0.1'", "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'k8s://127.0.1.1'", "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'local-cluster[2, 1, 1024]'", "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'invalid'", "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", "org.apache.spark.serializer.JavaSerializerSuite @ JavaSerializer instances are serializable", "org.apache.spark.serializer.JavaSerializerSuite @ Deserialize object containing a primitive Class as attribute", "org.apache.spark.serializer.JavaSerializerSuite @ SPARK-36627: Deserialize object containing a proxy Class as attribute", "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", "org.apache.spark.serializer.KryoSerializerSuite @ basic types", "org.apache.spark.serializer.KryoSerializerSuite @ pairs", "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", "org.apache.spark.serializer.KryoSerializerSuite @ ranges", "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", "org.apache.spark.serializer.KryoSerializerSuite @ instance reuse with autoReset = true, referenceTracking = true, usePool = true", "org.apache.spark.serializer.KryoSerializerSuite @ instance reuse with autoReset = true, referenceTracking = true, usePool = false", "org.apache.spark.serializer.KryoSerializerSuite @ instance reuse with autoReset = false, referenceTracking = true, usePool = true", "org.apache.spark.serializer.KryoSerializerSuite @ instance reuse with autoReset = false, referenceTracking = true, usePool = false", "org.apache.spark.serializer.KryoSerializerSuite @ instance reuse with autoReset = true, referenceTracking = false, usePool = true", "org.apache.spark.serializer.KryoSerializerSuite @ instance reuse with autoReset = true, referenceTracking = false, usePool = false", "org.apache.spark.serializer.KryoSerializerSuite @ instance reuse with autoReset = false, referenceTracking = false, usePool = true", "org.apache.spark.serializer.KryoSerializerSuite @ instance reuse with autoReset = false, referenceTracking = false, usePool = false", "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", "org.apache.spark.serializer.SerializationDebuggerSuite @ primitives, strings, and nulls", "org.apache.spark.serializer.SerializationDebuggerSuite @ primitive arrays", "org.apache.spark.serializer.SerializationDebuggerSuite @ non-primitive arrays", "org.apache.spark.serializer.SerializationDebuggerSuite @ serializable object", "org.apache.spark.serializer.SerializationDebuggerSuite @ nested arrays", "org.apache.spark.serializer.SerializationDebuggerSuite @ nested objects", "org.apache.spark.serializer.SerializationDebuggerSuite @ cycles (should not loop forever)", "org.apache.spark.serializer.SerializationDebuggerSuite @ root object not serializable", "org.apache.spark.serializer.SerializationDebuggerSuite @ array containing not serializable element", "org.apache.spark.serializer.SerializationDebuggerSuite @ object containing not serializable field", "org.apache.spark.serializer.SerializationDebuggerSuite @ externalizable class writing out not serializable object", "org.apache.spark.serializer.SerializationDebuggerSuite @ externalizable class writing out serializable objects", "org.apache.spark.serializer.SerializationDebuggerSuite @ object containing writeReplace() which returns not serializable object", "org.apache.spark.serializer.SerializationDebuggerSuite @ object containing writeReplace() which returns serializable object", "org.apache.spark.serializer.SerializationDebuggerSuite @ no infinite loop with writeReplace() which returns class of its own type", "org.apache.spark.serializer.SerializationDebuggerSuite @ object containing writeObject() and not serializable field", "org.apache.spark.serializer.SerializationDebuggerSuite @ object containing writeObject() and serializable field", "org.apache.spark.serializer.SerializationDebuggerSuite @ object of serializable subclass with more fields than superclass (SPARK-7180)", "org.apache.spark.serializer.SerializationDebuggerSuite @ crazy nested objects", "org.apache.spark.serializer.SerializationDebuggerSuite @ improveException", "org.apache.spark.serializer.SerializationDebuggerSuite @ improveException with error in debugger", "org.apache.spark.serializer.SerializerPropertiesSuite @ JavaSerializer does not support relocation", "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer supports relocation when auto-reset is enabled", "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer does not support relocation when auto-reset is disabled", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ instance reuse with autoReset = true, referenceTracking = true, usePool = true", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ instance reuse with autoReset = true, referenceTracking = true, usePool = false", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ instance reuse with autoReset = false, referenceTracking = true, usePool = true", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ instance reuse with autoReset = false, referenceTracking = true, usePool = false", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ instance reuse with autoReset = true, referenceTracking = false, usePool = true", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ instance reuse with autoReset = true, referenceTracking = false, usePool = false", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ instance reuse with autoReset = false, referenceTracking = false, usePool = true", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ instance reuse with autoReset = false, referenceTracking = false, usePool = false", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ A batch of blocks is limited by maxBlocksBatchSize", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are excluded in the preparation", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in a push request are limited by maxBlocksInFlightPerAddress", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-33701: Ensure all the blocks are pushed before notifying driver about push completion", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Basic block push", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are skipped for push", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in flight per address are limited by maxBlocksInFlightPerAddress", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Hit maxBlocksInFlightPerAddress limit so that the blocks are deferred", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of shuffle blocks grouped in a single push request is limited by maxBlockBatchSize", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error retries", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error logging", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Blocks are continued to push even when a block push fails with collision exception", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ More blocks are not pushed when a block push fails with too late exception", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Connect exceptions remove all the push requests for that host", "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-36255: FileNotFoundException stops the push", "org.apache.spark.shuffle.ShuffleDependencySuite @ key, value, and combiner classes correct in shuffle dependency without aggregation", "org.apache.spark.shuffle.ShuffleDependencySuite @ key, value, and combiner classes available in shuffle dependency with aggregation", "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write empty iterator", "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ commit shuffle files multiple times", "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ SPARK-33198 getMigrationBlocks should not fail at missing files", "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ getMergedBlockData should return expected FileSegmentManagedBuffer list", "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ getMergedBlockMeta should return expected MergedBlockMeta", "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ write checksum file", "org.apache.spark.shuffle.sort.io.LocalDiskShuffleMapOutputWriterSuite @ writing to an outputstream", "org.apache.spark.shuffle.sort.io.LocalDiskShuffleMapOutputWriterSuite @ writing to a channel", "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ supported shuffle dependencies for serialized shuffle", "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ unsupported shuffle dependencies for serialized shuffle", "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write checksum file (spill=true, aggregator=false, order=false)", "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write checksum file (spill=true, aggregator=true, order=false)", "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write checksum file (spill=true, aggregator=false, order=true)", "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write checksum file (spill=true, aggregator=true, order=true)", "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write checksum file (spill=false, aggregator=false, order=false)", "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write checksum file (spill=false, aggregator=true, order=false)", "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write checksum file (spill=false, aggregator=false, order=true)", "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write checksum file (spill=false, aggregator=true, order=true)", "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", "org.apache.spark.ShuffleNettySuite @ shuffle serializer", "org.apache.spark.ShuffleNettySuite @ zero sized blocks", "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", "org.apache.spark.SortShuffleSuite @ groupByKey without compression", "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", "org.apache.spark.SortShuffleSuite @ shuffle serializer", "org.apache.spark.SortShuffleSuite @ zero sized blocks", "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", "org.apache.spark.SparkConfSuite @ Test byteString conversion", "org.apache.spark.SparkConfSuite @ Test timeString conversion", "org.apache.spark.SparkConfSuite @ loading from system properties", "org.apache.spark.SparkConfSuite @ initializing without loading defaults", "org.apache.spark.SparkConfSuite @ named set methods", "org.apache.spark.SparkConfSuite @ basic get and set", "org.apache.spark.SparkConfSuite @ basic getAllWithPrefix", "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", "org.apache.spark.SparkConfSuite @ creating SparkContext without master", "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", "org.apache.spark.SparkConfSuite @ SparkContext property overriding", "org.apache.spark.SparkConfSuite @ nested property names", "org.apache.spark.SparkConfSuite @ Thread safeness - SPARK-5425", "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses", "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses and custom registrator", "org.apache.spark.SparkConfSuite @ register kryo classes through conf", "org.apache.spark.SparkConfSuite @ deprecated configs", "org.apache.spark.SparkConfSuite @ SPARK-13727", "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (java)", "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (kryo)", "org.apache.spark.SparkConfSuite @ encryption requires authentication", "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", "org.apache.spark.SparkConfSuite @ SPARK-26998: SSL configuration not needed on executors", "org.apache.spark.SparkConfSuite @ SPARK-27244 toDebugString redacts sensitive information", "org.apache.spark.SparkConfSuite @ SPARK-28355: Use Spark conf for threshold at which UDFs are compressed by broadcast", "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb with default throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds with default throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default long throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default string throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getDouble throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs with default throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb with default throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getInt throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb with default throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getBoolean throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ SPARK-24337: getLong throws an useful error message with key name", "org.apache.spark.SparkConfSuite @ get task resource requirement from config", "org.apache.spark.SparkConfSuite @ test task resource requirement with 0 amount", "org.apache.spark.SparkConfSuite @ Ensure that we can configure fractional resources for a task", "org.apache.spark.SparkConfSuite @ Non-task resources are never fractional", "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", "org.apache.spark.SparkContextSchedulerCreationSuite @ local", "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", "org.apache.spark.SparkContextSuite @ Test getOrCreate", "org.apache.spark.SparkContextSuite @ BytesWritable implicit conversion is correct", "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", "org.apache.spark.SparkContextSuite @ add and list jar files", "org.apache.spark.SparkContextSuite @ add FS jar files not exists", "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", "org.apache.spark.SparkContextSuite @ addFile recursive works", "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", "org.apache.spark.SparkContextSuite @ add jar with invalid path", "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", "org.apache.spark.SparkContextSuite @ Cancelling job group should not cause SparkContext to shutdown (SPARK-6414)", "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", "org.apache.spark.SparkContextSuite @ No exception when both num-executors and dynamic allocation set.", "org.apache.spark.SparkContextSuite @ localProperties are inherited by spawned threads.", "org.apache.spark.SparkContextSuite @ localProperties do not cross-talk between threads.", "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", "org.apache.spark.SparkContextSuite @ Cancelling stages/jobs with custom reasons.", "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", "org.apache.spark.SparkContextSuite @ SPARK-19446: DebugFilesystem.assertNoOpenStreams should report open streams to help debugging", "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", "org.apache.spark.SSLOptionsSuite @ test resolving property file as spark conf", "org.apache.spark.SSLOptionsSuite @ test resolving property with defaults specified", "org.apache.spark.SSLOptionsSuite @ test whether defaults can be overridden", "org.apache.spark.SSLOptionsSuite @ variable substitution", "org.apache.spark.SSLOptionsSuite @ get password from Hadoop credential provider", "org.apache.spark.status.api.v1.ExecutorSummarySuite @ Check ExecutorSummary serialize and deserialize with empty peakMemoryMetrics", "org.apache.spark.status.api.v1.SimpleDateParamSuite @ date parsing", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ environment info", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ scheduler events", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage events", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction of old data", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect job completion time", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect stage completion time", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ skipped stages should be evicted before completed stages", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect task completion time", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ lastStageAttempt should fail when the stage doesn't exist", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-24415: update metrics for tasks that finish late", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ driver logs", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ executor metrics updates", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ stage executor metrics", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage information on executor lost/down", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ clean up used memory when BlockManager added", "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ environment info", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ scheduler events", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage events", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction of old data", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect job completion time", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect stage completion time", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ skipped stages should be evicted before completed stages", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect task completion time", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ lastStageAttempt should fail when the stage doesn't exist", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-24415: update metrics for tasks that finish late", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ driver logs", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ executor metrics updates", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ stage executor metrics", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage information on executor lost/down", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ clean up used memory when BlockManager added", "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ environment info", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ scheduler events", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage events", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction of old data", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect job completion time", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect stage completion time", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ skipped stages should be evicted before completed stages", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect task completion time", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ lastStageAttempt should fail when the stage doesn't exist", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-24415: update metrics for tasks that finish late", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ driver logs", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ executor metrics updates", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ stage executor metrics", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage information on executor lost/down", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ clean up used memory when BlockManager added", "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: 1 task", "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: few tasks", "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: more tasks", "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: lots of tasks", "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: custom quantiles", "org.apache.spark.status.AppStatusStoreSuite @ quantile cache", "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk leveldb)", "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk rocksdb)", "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory)", "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory live)", "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary", "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary should not be present if there are no speculative tasks", "org.apache.spark.status.AppStatusUtilsSuite @ schedulerDelay", "org.apache.spark.status.ElementTrackingStoreSuite @ asynchronous tracking single-fire", "org.apache.spark.status.ElementTrackingStoreSuite @ tracking for multiple types", "org.apache.spark.status.LiveEntitySuite @ partition seq", "org.apache.spark.status.LiveEntitySuite @ Only show few elements of CollectionAccumulator when converting to v1.AccumulableInfo", "org.apache.spark.StatusTrackerSuite @ basic status API usage", "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", "org.apache.spark.storage.BlockIdSuite @ test-bad-deserialization", "org.apache.spark.storage.BlockIdSuite @ rdd", "org.apache.spark.storage.BlockIdSuite @ shuffle", "org.apache.spark.storage.BlockIdSuite @ shuffle batch", "org.apache.spark.storage.BlockIdSuite @ shuffle data", "org.apache.spark.storage.BlockIdSuite @ shuffle index", "org.apache.spark.storage.BlockIdSuite @ shuffle merged data", "org.apache.spark.storage.BlockIdSuite @ shuffle merged index", "org.apache.spark.storage.BlockIdSuite @ shuffle merged meta", "org.apache.spark.storage.BlockIdSuite @ shuffle merged block", "org.apache.spark.storage.BlockIdSuite @ broadcast", "org.apache.spark.storage.BlockIdSuite @ taskresult", "org.apache.spark.storage.BlockIdSuite @ stream", "org.apache.spark.storage.BlockIdSuite @ temp local", "org.apache.spark.storage.BlockIdSuite @ temp shuffle", "org.apache.spark.storage.BlockIdSuite @ test", "org.apache.spark.storage.BlockIdSuite @ merged shuffle id", "org.apache.spark.storage.BlockIdSuite @ shuffle chunk", "org.apache.spark.storage.BlockInfoManagerSuite @ initial memory usage", "org.apache.spark.storage.BlockInfoManagerSuite @ get non-existent block", "org.apache.spark.storage.BlockInfoManagerSuite @ basic lockNewBlockForWriting", "org.apache.spark.storage.BlockInfoManagerSuite @ lockNewBlockForWriting blocks while write lock is held, then returns false after release", "org.apache.spark.storage.BlockInfoManagerSuite @ lockNewBlockForWriting blocks while write lock is held, then returns true after removal", "org.apache.spark.storage.BlockInfoManagerSuite @ read locks are reentrant", "org.apache.spark.storage.BlockInfoManagerSuite @ multiple tasks can hold read locks", "org.apache.spark.storage.BlockInfoManagerSuite @ single task can hold write lock", "org.apache.spark.storage.BlockInfoManagerSuite @ cannot grab a writer lock while already holding a write lock", "org.apache.spark.storage.BlockInfoManagerSuite @ assertBlockIsLockedForWriting throws exception if block is not locked", "org.apache.spark.storage.BlockInfoManagerSuite @ downgrade lock", "org.apache.spark.storage.BlockInfoManagerSuite @ write lock will block readers", "org.apache.spark.storage.BlockInfoManagerSuite @ read locks will block writer", "org.apache.spark.storage.BlockInfoManagerSuite @ removing a non-existent block throws SparkException", "org.apache.spark.storage.BlockInfoManagerSuite @ removing a block without holding any locks throws IllegalStateException", "org.apache.spark.storage.BlockInfoManagerSuite @ removing a block while holding only a read lock throws IllegalStateException", "org.apache.spark.storage.BlockInfoManagerSuite @ removing a block causes blocked callers to receive None", "org.apache.spark.storage.BlockInfoManagerSuite @ releaseAllLocksForTask releases write locks", "org.apache.spark.storage.BlockInfoManagerSuite @ SPARK-38675 - concurrent unlock and releaseAllLocksForTask calls should not fail", "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test that with no blocks we finish migration", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no migrations configured", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no peers", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with only shuffle files time moves forward", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager does not re-add removed shuffle files", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ SPARK-40168: block decom manager handles shuffle file not found", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager handles IO failures", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager short circuits removed blocks", "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test shuffle and cached rdd migration without any error", "org.apache.spark.storage.BlockManagerInfoSuite @ broadcast block externalShuffleServiceEnabled=true", "org.apache.spark.storage.BlockManagerInfoSuite @ broadcast block externalShuffleServiceEnabled=false", "org.apache.spark.storage.BlockManagerInfoSuite @ RDD block with MEMORY_ONLY externalShuffleServiceEnabled=true", "org.apache.spark.storage.BlockManagerInfoSuite @ RDD block with MEMORY_ONLY externalShuffleServiceEnabled=false", "org.apache.spark.storage.BlockManagerInfoSuite @ RDD block with MEMORY_AND_DISK externalShuffleServiceEnabled=true", "org.apache.spark.storage.BlockManagerInfoSuite @ RDD block with MEMORY_AND_DISK externalShuffleServiceEnabled=false", "org.apache.spark.storage.BlockManagerInfoSuite @ RDD block with DISK_ONLY externalShuffleServiceEnabled=true", "org.apache.spark.storage.BlockManagerInfoSuite @ RDD block with DISK_ONLY externalShuffleServiceEnabled=false", "org.apache.spark.storage.BlockManagerInfoSuite @ update from MEMORY_ONLY to DISK_ONLY externalShuffleServiceEnabled=true", "org.apache.spark.storage.BlockManagerInfoSuite @ update from MEMORY_ONLY to DISK_ONLY externalShuffleServiceEnabled=false", "org.apache.spark.storage.BlockManagerInfoSuite @ using invalid StorageLevel externalShuffleServiceEnabled=true", "org.apache.spark.storage.BlockManagerInfoSuite @ using invalid StorageLevel externalShuffleServiceEnabled=false", "org.apache.spark.storage.BlockManagerInfoSuite @ remove block and add another one externalShuffleServiceEnabled=true", "org.apache.spark.storage.BlockManagerInfoSuite @ remove block and add another one externalShuffleServiceEnabled=false", "org.apache.spark.storage.BlockManagerMasterSuite @ SPARK-31422: getMemoryStatus should not fail after BlockManagerMaster stops", "org.apache.spark.storage.BlockManagerMasterSuite @ SPARK-31422: getStorageStatus should not fail after BlockManagerMaster stops", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", "org.apache.spark.storage.BlockManagerSuite @ removing block", "org.apache.spark.storage.BlockManagerSuite @ removing rdd", "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", "org.apache.spark.storage.BlockManagerSuite @ SPARK-27622: avoid the network when block requested from same host, StorageLevel(disk, 1 replicas)", "org.apache.spark.storage.BlockManagerSuite @ SPARK-27622: avoid the network when block requested from same host, StorageLevel(disk, deserialized, 1 replicas)", "org.apache.spark.storage.BlockManagerSuite @ SPARK-27622: avoid the network when block requested from same host, StorageLevel(disk, deserialized, 2 replicas)", "org.apache.spark.storage.BlockManagerSuite @ SPARK-27622: as file is removed fall back to network fetch, StorageLevel(disk, 1 replicas), getRemoteValue()", "org.apache.spark.storage.BlockManagerSuite @ SPARK-27622: as file is removed fall back to network fetch, StorageLevel(disk, 1 replicas), getRemoteBytes()", "org.apache.spark.storage.BlockManagerSuite @ SPARK-27622: as file is removed fall back to network fetch, StorageLevel(disk, deserialized, 1 replicas), getRemoteValue()", "org.apache.spark.storage.BlockManagerSuite @ SPARK-27622: as file is removed fall back to network fetch, StorageLevel(disk, deserialized, 1 replicas), getRemoteBytes()", "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", "org.apache.spark.storage.BlockManagerSuite @ overly large block", "org.apache.spark.storage.BlockManagerSuite @ block compression", "org.apache.spark.storage.BlockManagerSuite @ block store put failure", "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching, serialized (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching, serialized (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", "org.apache.spark.storage.BlockManagerSuite @ query block statuses", "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", "org.apache.spark.storage.DiskBlockManagerSuite @ basic block creation", "org.apache.spark.storage.DiskBlockManagerSuite @ enumerating blocks", "org.apache.spark.storage.DiskBlockManagerSuite @ SPARK-22227: non-block files are skipped", "org.apache.spark.storage.DiskBlockManagerSuite @ should still create merge directories if one already exists under a local dir", "org.apache.spark.storage.DiskBlockManagerSuite @ Test dir creation with permission 770", "org.apache.spark.storage.DiskBlockManagerSuite @ Encode merged directory name and attemptId in shuffleManager field", "org.apache.spark.storage.DiskBlockManagerSuite @ SPARK-37618: Sub dirs are group writable when removing from shuffle service enabled", "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", "org.apache.spark.storage.DiskStoreSuite @ reads of memory-mapped and non memory-mapped files are equivalent", "org.apache.spark.storage.DiskStoreSuite @ block size tracking", "org.apache.spark.storage.DiskStoreSuite @ blocks larger than 2gb", "org.apache.spark.storage.DiskStoreSuite @ block data encryption", "org.apache.spark.storage.FallbackStorageSuite @ fallback storage APIs - copy/exists", "org.apache.spark.storage.FallbackStorageSuite @ SPARK-39200: fallback storage APIs - readFully", "org.apache.spark.storage.FallbackStorageSuite @ SPARK-34142: fallback storage API - cleanUp", "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", "org.apache.spark.storage.LocalDirsSuite @ Utils.getLocalDir() returns a valid directory, even if some local dirs are missing", "org.apache.spark.storage.LocalDirsSuite @ SPARK_LOCAL_DIRS override also affects driver", "org.apache.spark.storage.LocalDirsSuite @ Utils.getLocalDir() throws an exception if any temporary directory cannot be retrieved", "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", "org.apache.spark.storage.PartiallyUnrolledIteratorSuite @ join two iterators", "org.apache.spark.storage.RandomBlockReplicationPolicyBehavior @ block replication - random block replication policy", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-36206: diagnose the block when it's corrupted twice", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-36206: diagnose the block when it's corrupted inside BufferReleasingInputStream", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ successful 3 local + 4 host local + 2 remote reads", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ error during accessing host local dirs for executors", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ Hit maxBytesInFlight limitation before maxBlocksInFlightPerAddress", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ Hit maxBlocksInFlightPerAddress limitation before maxBytesInFlight", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ fetch continuous blocks in batch successful 3 local + 4 host local + 2 remote reads", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ fetch continuous blocks in batch should respect maxBytesInFlight", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-35910: Update remoteBlockBytes based on merged fetch request", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ fetch continuous blocks in batch should respect maxBlocksInFlightPerAddress", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ release current unexhausted buffer in case the task completes early", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ fail all blocks if any of the remote request fails", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ retry corrupt blocks", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ big blocks are also checked for corruption", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ ensure big blocks available as a concatenated stream can be read", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ retry corrupt blocks (disabled)", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ Blocks should be shuffled to disk when size of the request is above the threshold(maxReqSizeShuffleToMem).", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ fail zero-size blocks", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-31521: correct the fetch size when merging blocks into a merged block", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-27991: defer shuffle fetch request (one block) on Netty OOM", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-27991: defer shuffle fetch request (multiple blocks) on Netty OOM, oomBlockIndex=0", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-27991: defer shuffle fetch request (multiple blocks) on Netty OOM, oomBlockIndex=1", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-27991: defer shuffle fetch request (multiple blocks) on Netty OOM, oomBlockIndex=2", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-27991: block shouldn't retry endlessly on Netty OOM", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: fetch remote push-merged block meta", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: failed to fetch remote push-merged block meta so fallback to original blocks.", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: iterator has just 1 push-merged block and fails to fetch the meta", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: failure to fetch push-merged-local meta should fallback to fetch original shuffle blocks", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: failure to reading chunkBitmaps of push-merged-local meta should fallback to original shuffle blocks", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: failure to fetch push-merged-local data should fallback to fetch original shuffle blocks", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: failure to fetch push-merged-local meta of a single merged block should not drop the fetch of other push-merged-local blocks", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: failure to fetch push-merged block as well as fallback block should throw a FetchFailedException", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: failure to fetch push-merged-local block should fallback to fetch original shuffle blocks which contain host-local blocks", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: fetch host local blocks with push-merged block during initialization and fallback to host locals blocks", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: failure while reading local shuffle chunks should fallback to original shuffle blocks", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: fallback to original shuffle block when a push-merged shuffle chunk is corrupt", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: fallback to original blocks when failed to fetch remote shuffle chunk", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: fallback to original blocks when failed to parse remote merged block meta", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: failure to fetch a remote shuffle chunk initiates the fallback of pending shuffle chunks immediately", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-32922: failure to fetch a remote shuffle chunk initiates the fallback of pending shuffle chunks immediately which got deferred", "org.apache.spark.storage.ShuffleBlockFetcherIteratorSuite @ SPARK-38987: failure to fetch corrupted shuffle block chunk should throw a FetchFailedException when early detection is unable to catch corruption", "org.apache.spark.storage.StorageSuite @ storage status add non-RDD blocks", "org.apache.spark.storage.StorageSuite @ storage status add RDD blocks", "org.apache.spark.storage.StorageSuite @ storage status getBlock", "org.apache.spark.storage.StorageSuite @ storage status memUsed, diskUsed, externalBlockStoreUsed", "org.apache.spark.storage.StorageSuite @ storage memUsed, diskUsed with on-heap and off-heap blocks", "org.apache.spark.storage.StorageSuite @ old SparkListenerBlockManagerAdded event compatible", "org.apache.spark.storage.TopologyAwareBlockReplicationPolicyBehavior @ block replication - random block replication policy", "org.apache.spark.storage.TopologyAwareBlockReplicationPolicyBehavior @ All peers in the same rack", "org.apache.spark.storage.TopologyAwareBlockReplicationPolicyBehavior @ Peers in 2 racks", "org.apache.spark.storage.TopologyMapperSuite @ File based Topology Mapper", "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", "org.apache.spark.ThreadingSuite @ parallel job execution", "org.apache.spark.ThreadingSuite @ set local properties in different thread", "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", "org.apache.spark.ui.HttpSecurityFilterSuite @ filter bad user input", "org.apache.spark.ui.HttpSecurityFilterSuite @ perform access control", "org.apache.spark.ui.HttpSecurityFilterSuite @ set security-related headers", "org.apache.spark.ui.HttpSecurityFilterSuite @ doAs impersonation", "org.apache.spark.ui.PagedDataSourceSuite @ basic", "org.apache.spark.ui.PagedTableSuite @ pageNavigation", "org.apache.spark.ui.PagedTableSuite @ pageNavigation with different id", "org.apache.spark.ui.scope.RDDOperationGraphSuite @ Test simple cluster equals", "org.apache.spark.ui.StagePageSuite @ ApiHelper.COLUMN_TO_INDEX should match headers of the task table", "org.apache.spark.ui.storage.StoragePageSuite @ rddTable", "org.apache.spark.ui.storage.StoragePageSuite @ empty rddTable", "org.apache.spark.ui.storage.StoragePageSuite @ streamBlockStorageLevelDescriptionAndSize", "org.apache.spark.ui.storage.StoragePageSuite @ receiverBlockTables", "org.apache.spark.ui.storage.StoragePageSuite @ empty receiverBlockTables", "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", "org.apache.spark.ui.UISuite @ basic ui visibility", "org.apache.spark.ui.UISuite @ visibility at localhost:4040", "org.apache.spark.ui.UISuite @ jetty selects different port under contention", "org.apache.spark.ui.UISuite @ jetty with https selects different port under contention", "org.apache.spark.ui.UISuite @ jetty binds to port 0 correctly", "org.apache.spark.ui.UISuite @ jetty with https binds to port 0 correctly", "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", "org.apache.spark.ui.UISuite @ verify webUrl contains the port", "org.apache.spark.ui.UISuite @ verify proxy rewrittenURI", "org.apache.spark.ui.UISuite @ SPARK-33611: Avoid encoding twice on the query parameter of proxy rewrittenURI", "org.apache.spark.ui.UISuite @ verify rewriting location header for reverse proxy", "org.apache.spark.ui.UISuite @ add and remove handlers with custom user filter", "org.apache.spark.ui.UISuite @ SPARK-32467: Avoid encoding URL twice on https redirect", "org.apache.spark.ui.UISuite @ http -> https redirect applies to all URIs", "org.apache.spark.ui.UISuite @ specify both http and https ports separately", "org.apache.spark.ui.UISuite @ redirect with proxy server support", "org.apache.spark.ui.UISuite @ SPARK-34449: Jetty 9.4.35.v20201120 and later no longer return status code 302 and handle internally when request URL ends with a context path without trailing '/'", "org.apache.spark.ui.UISuite @ SPARK-34449: default thread pool size of different jetty servers", "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", "org.apache.spark.ui.UIUtilsSuite @ makeDescription(plainText = false)", "org.apache.spark.ui.UIUtilsSuite @ makeDescription(plainText = true)", "org.apache.spark.ui.UIUtilsSuite @ SPARK-11906: Progress bar should not overflow because of speculative tasks", "org.apache.spark.ui.UIUtilsSuite @ decodeURLParameter (SPARK-12708: Sorting task error in Stages Page when yarn mode.)", "org.apache.spark.ui.UIUtilsSuite @ listingTable with tooltips", "org.apache.spark.ui.UIUtilsSuite @ listingTable without tooltips", "org.apache.spark.UnpersistSuite @ unpersist RDD", "org.apache.spark.util.AccumulatorV2Suite @ LongAccumulator add/avg/sum/count/isZero", "org.apache.spark.util.AccumulatorV2Suite @ DoubleAccumulator add/avg/sum/count/isZero", "org.apache.spark.util.AccumulatorV2Suite @ ListAccumulator", "org.apache.spark.util.BoundedPriorityQueueSuite @ BoundedPriorityQueue poll test", "org.apache.spark.util.CausedBySuite @ For an error without a cause, should return the error", "org.apache.spark.util.CausedBySuite @ For an error with a cause, should return the cause of the error", "org.apache.spark.util.CausedBySuite @ For an error with a cause that itself has a cause, return the root cause", "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", "org.apache.spark.util.ClosureCleanerSuite @ createNullValue", "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", "org.apache.spark.util.collection.AppendOnlyMapSuite @ initialization", "org.apache.spark.util.collection.AppendOnlyMapSuite @ object keys and values", "org.apache.spark.util.collection.AppendOnlyMapSuite @ primitive keys and values", "org.apache.spark.util.collection.AppendOnlyMapSuite @ null keys", "org.apache.spark.util.collection.AppendOnlyMapSuite @ null values", "org.apache.spark.util.collection.AppendOnlyMapSuite @ changeValue", "org.apache.spark.util.collection.AppendOnlyMapSuite @ inserting in capacity-1 map", "org.apache.spark.util.collection.AppendOnlyMapSuite @ destructive sort", "org.apache.spark.util.collection.BitSetSuite @ basic set and get", "org.apache.spark.util.collection.BitSetSuite @ 100% full bit set", "org.apache.spark.util.collection.BitSetSuite @ nextSetBit", "org.apache.spark.util.collection.BitSetSuite @ xor len(bitsetX) < len(bitsetY)", "org.apache.spark.util.collection.BitSetSuite @ xor len(bitsetX) > len(bitsetY)", "org.apache.spark.util.collection.BitSetSuite @ andNot len(bitsetX) < len(bitsetY)", "org.apache.spark.util.collection.BitSetSuite @ andNot len(bitsetX) > len(bitsetY)", "org.apache.spark.util.collection.BitSetSuite @ [gs]etUntil", "org.apache.spark.util.collection.CompactBufferSuite @ empty buffer", "org.apache.spark.util.collection.CompactBufferSuite @ basic inserts", "org.apache.spark.util.collection.CompactBufferSuite @ adding sequences", "org.apache.spark.util.collection.CompactBufferSuite @ adding the same buffer to itself", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ sorting, no partial aggregation with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ sorting, no partial aggregation with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ sorting, no partial aggregation with spilling with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ sorting, no partial aggregation with spilling with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation, no sorting with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation, no sorting with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation, no sorting with spilling with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation, no sorting with spilling with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking timsort contracts for large arrays", "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", "org.apache.spark.util.collection.ImmutableBitSetSuite @ basic get", "org.apache.spark.util.collection.ImmutableBitSetSuite @ nextSetBit", "org.apache.spark.util.collection.ImmutableBitSetSuite @ xor len(bitsetX) < len(bitsetY)", "org.apache.spark.util.collection.ImmutableBitSetSuite @ xor len(bitsetX) > len(bitsetY)", "org.apache.spark.util.collection.ImmutableBitSetSuite @ andNot len(bitsetX) < len(bitsetY)", "org.apache.spark.util.collection.ImmutableBitSetSuite @ andNot len(bitsetX) > len(bitsetY)", "org.apache.spark.util.collection.ImmutableBitSetSuite @ immutability", "org.apache.spark.util.collection.MedianHeapSuite @ If no numbers in MedianHeap, NoSuchElementException is thrown.", "org.apache.spark.util.collection.MedianHeapSuite @ Median should be correct when size of MedianHeap is even", "org.apache.spark.util.collection.MedianHeapSuite @ Median should be correct when size of MedianHeap is odd", "org.apache.spark.util.collection.MedianHeapSuite @ Median should be correct though there are duplicated numbers inside.", "org.apache.spark.util.collection.MedianHeapSuite @ Median should be correct when input data is skewed.", "org.apache.spark.util.collection.OpenHashMapSuite @ size for specialized, primitive value (int)", "org.apache.spark.util.collection.OpenHashMapSuite @ initialization", "org.apache.spark.util.collection.OpenHashMapSuite @ primitive value", "org.apache.spark.util.collection.OpenHashMapSuite @ non-primitive value", "org.apache.spark.util.collection.OpenHashMapSuite @ null keys", "org.apache.spark.util.collection.OpenHashMapSuite @ null values", "org.apache.spark.util.collection.OpenHashMapSuite @ changeValue", "org.apache.spark.util.collection.OpenHashMapSuite @ inserting in capacity-1 map", "org.apache.spark.util.collection.OpenHashMapSuite @ contains", "org.apache.spark.util.collection.OpenHashMapSuite @ distinguish between the 0/0.0/0L and null", "org.apache.spark.util.collection.OpenHashSetSuite @ size for specialized, primitive int", "org.apache.spark.util.collection.OpenHashSetSuite @ primitive int", "org.apache.spark.util.collection.OpenHashSetSuite @ primitive long", "org.apache.spark.util.collection.OpenHashSetSuite @ primitive float", "org.apache.spark.util.collection.OpenHashSetSuite @ primitive double", "org.apache.spark.util.collection.OpenHashSetSuite @ non-primitive", "org.apache.spark.util.collection.OpenHashSetSuite @ non-primitive set growth", "org.apache.spark.util.collection.OpenHashSetSuite @ primitive set growth", "org.apache.spark.util.collection.OpenHashSetSuite @ SPARK-18200 Support zero as an initial set size", "org.apache.spark.util.collection.OpenHashSetSuite @ support for more than 12M items", "org.apache.spark.util.collection.PrimitiveKeyOpenHashMapSuite @ size for specialized, primitive key, value (int, int)", "org.apache.spark.util.collection.PrimitiveKeyOpenHashMapSuite @ initialization", "org.apache.spark.util.collection.PrimitiveKeyOpenHashMapSuite @ basic operations", "org.apache.spark.util.collection.PrimitiveKeyOpenHashMapSuite @ null values", "org.apache.spark.util.collection.PrimitiveKeyOpenHashMapSuite @ changeValue", "org.apache.spark.util.collection.PrimitiveKeyOpenHashMapSuite @ inserting in capacity-1 map", "org.apache.spark.util.collection.PrimitiveKeyOpenHashMapSuite @ contains", "org.apache.spark.util.collection.PrimitiveVectorSuite @ primitive value", "org.apache.spark.util.collection.PrimitiveVectorSuite @ non-primitive value", "org.apache.spark.util.collection.PrimitiveVectorSuite @ ideal growth", "org.apache.spark.util.collection.PrimitiveVectorSuite @ ideal size", "org.apache.spark.util.collection.PrimitiveVectorSuite @ resizing", "org.apache.spark.util.collection.SizeTrackerSuite @ vector fixed size insertions", "org.apache.spark.util.collection.SizeTrackerSuite @ vector variable size insertions", "org.apache.spark.util.collection.SizeTrackerSuite @ map fixed size insertions", "org.apache.spark.util.collection.SizeTrackerSuite @ map variable size insertions", "org.apache.spark.util.collection.SizeTrackerSuite @ map updates", "org.apache.spark.util.collection.SorterSuite @ equivalent to Arrays.sort", "org.apache.spark.util.collection.SorterSuite @ KVArraySorter", "org.apache.spark.util.collection.SorterSuite @ SPARK-5984 TimSort bug", "org.apache.spark.util.collection.SorterSuite @ java.lang.ArrayIndexOutOfBoundsException in TimSort", "org.apache.spark.util.collection.SorterSuite @ Sorter benchmark for key-value pairs", "org.apache.spark.util.collection.SorterSuite @ Sorter benchmark for primitive int array", "org.apache.spark.util.collection.unsafe.sort.PrefixComparatorsSuite @ String prefix comparator", "org.apache.spark.util.collection.unsafe.sort.PrefixComparatorsSuite @ Binary prefix comparator", "org.apache.spark.util.collection.unsafe.sort.PrefixComparatorsSuite @ double prefix comparator handles NaNs properly", "org.apache.spark.util.collection.unsafe.sort.PrefixComparatorsSuite @ double prefix comparator handles negative NaNs properly", "org.apache.spark.util.collection.unsafe.sort.PrefixComparatorsSuite @ double prefix comparator handles other special values properly", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ radix support for unsigned binary data asc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort unsigned binary data asc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort key prefix unsigned binary data asc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test unsigned binary data asc nulls first with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test key prefix unsigned binary data asc nulls first with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ radix support for unsigned binary data asc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort unsigned binary data asc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort key prefix unsigned binary data asc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test unsigned binary data asc nulls last with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test key prefix unsigned binary data asc nulls last with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ radix support for unsigned binary data desc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort unsigned binary data desc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort key prefix unsigned binary data desc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test unsigned binary data desc nulls last with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test key prefix unsigned binary data desc nulls last with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ radix support for unsigned binary data desc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort unsigned binary data desc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort key prefix unsigned binary data desc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test unsigned binary data desc nulls first with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test key prefix unsigned binary data desc nulls first with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ radix support for twos complement asc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort twos complement asc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort key prefix twos complement asc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test twos complement asc nulls first with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test key prefix twos complement asc nulls first with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ radix support for twos complement asc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort twos complement asc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort key prefix twos complement asc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test twos complement asc nulls last with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test key prefix twos complement asc nulls last with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ radix support for twos complement desc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort twos complement desc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort key prefix twos complement desc nulls last", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test twos complement desc nulls last with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test key prefix twos complement desc nulls last with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ radix support for twos complement desc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort twos complement desc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort key prefix twos complement desc nulls first", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test twos complement desc nulls first with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test key prefix twos complement desc nulls first with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ radix support for binary data partial", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort binary data partial", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ sort key prefix binary data partial", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test binary data partial with random bitmasks", "org.apache.spark.util.collection.unsafe.sort.RadixSortSuite @ fuzz test key prefix binary data partial with random bitmasks", "org.apache.spark.util.CompletionIteratorSuite @ basic test", "org.apache.spark.util.CompletionIteratorSuite @ reference to sub iterator should not be available after completion", "org.apache.spark.util.DependencyUtilsSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid ivy uri", "org.apache.spark.util.DistributionSuite @ summary", "org.apache.spark.util.EventLoopSuite @ EventLoop", "org.apache.spark.util.EventLoopSuite @ EventLoop: start and stop", "org.apache.spark.util.EventLoopSuite @ EventLoop: onError", "org.apache.spark.util.EventLoopSuite @ EventLoop: error thrown from onError should not crash the event thread", "org.apache.spark.util.EventLoopSuite @ EventLoop: calling stop multiple times should only call onStop once", "org.apache.spark.util.EventLoopSuite @ EventLoop: post event in multiple threads", "org.apache.spark.util.EventLoopSuite @ EventLoop: onReceive swallows InterruptException", "org.apache.spark.util.EventLoopSuite @ EventLoop: stop in eventThread", "org.apache.spark.util.EventLoopSuite @ EventLoop: stop() in onStart should call onStop", "org.apache.spark.util.EventLoopSuite @ EventLoop: stop() in onReceive should call onStop", "org.apache.spark.util.EventLoopSuite @ EventLoop: stop() in onError should call onStop", "org.apache.spark.util.FileAppenderSuite @ basic file appender", "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: basic file appender - close stream", "org.apache.spark.util.FileAppenderSuite @ rolling file appender - time-based rolling", "org.apache.spark.util.FileAppenderSuite @ rolling file appender - time-based rolling (compressed)", "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - time-based rolling close stream", "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - size-based rolling close stream", "org.apache.spark.util.FileAppenderSuite @ rolling file appender - size-based rolling", "org.apache.spark.util.FileAppenderSuite @ rolling file appender - size-based rolling (compressed)", "org.apache.spark.util.FileAppenderSuite @ rolling file appender - cleaning", "org.apache.spark.util.FileAppenderSuite @ file appender selection", "org.apache.spark.util.FileAppenderSuite @ file appender async close stream abruptly", "org.apache.spark.util.FileAppenderSuite @ file appender async close stream gracefully", "org.apache.spark.util.HadoopFSUtilsSuite @ HadoopFSUtils - file filtering", "org.apache.spark.util.io.ChunkedByteBufferOutputStreamSuite @ empty output", "org.apache.spark.util.io.ChunkedByteBufferOutputStreamSuite @ write a single byte", "org.apache.spark.util.io.ChunkedByteBufferOutputStreamSuite @ write a single near boundary", "org.apache.spark.util.io.ChunkedByteBufferOutputStreamSuite @ write a single at boundary", "org.apache.spark.util.io.ChunkedByteBufferOutputStreamSuite @ single chunk output", "org.apache.spark.util.io.ChunkedByteBufferOutputStreamSuite @ single chunk output at boundary size", "org.apache.spark.util.io.ChunkedByteBufferOutputStreamSuite @ multiple chunk output", "org.apache.spark.util.io.ChunkedByteBufferOutputStreamSuite @ multiple chunk output at boundary size", "org.apache.spark.util.io.ChunkedByteBufferOutputStreamSuite @ SPARK-36464: size returns correct positive number even with over 2GB data", "org.apache.spark.util.JsonProtocolSuite @ SparkListenerEvent", "org.apache.spark.util.JsonProtocolSuite @ Dependent Classes", "org.apache.spark.util.JsonProtocolSuite @ ExceptionFailure backward compatibility: full stack trace", "org.apache.spark.util.JsonProtocolSuite @ StageInfo backward compatibility (details, accumulables)", "org.apache.spark.util.JsonProtocolSuite @ StageInfo resourceProfileId", "org.apache.spark.util.JsonProtocolSuite @ InputMetrics backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ Input/Output records backwards compatibility", "org.apache.spark.util.JsonProtocolSuite @ Shuffle Read/Write records backwards compatibility", "org.apache.spark.util.JsonProtocolSuite @ OutputMetrics backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ TaskMetrics backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ StorageLevel backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ BlockManager events backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ FetchFailed backwards compatibility", "org.apache.spark.util.JsonProtocolSuite @ SPARK-32124: FetchFailed Map Index backwards compatibility", "org.apache.spark.util.JsonProtocolSuite @ ShuffleReadMetrics: Local bytes read backwards compatibility", "org.apache.spark.util.JsonProtocolSuite @ SparkListenerApplicationStart backwards compatibility", "org.apache.spark.util.JsonProtocolSuite @ ExecutorLostFailure backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ SparkListenerJobStart backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ SparkListenerJobStart and SparkListenerJobEnd backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ RDDInfo backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ StageInfo backward compatibility (parent IDs)", "org.apache.spark.util.JsonProtocolSuite @ TaskCommitDenied backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ AccumulableInfo backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ ExceptionFailure backward compatibility: accumulator updates", "org.apache.spark.util.JsonProtocolSuite @ TaskKilled backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ ExecutorMetricsUpdate backward compatibility: executor metrics update", "org.apache.spark.util.JsonProtocolSuite @ executorMetricsFromJson backward compatibility: handle missing metrics", "org.apache.spark.util.JsonProtocolSuite @ EnvironmentUpdate backward compatibility: handle missing metrics properties", "org.apache.spark.util.JsonProtocolSuite @ ExecutorInfo backward compatibility", "org.apache.spark.util.JsonProtocolSuite @ TaskInfo backward compatibility: handle missing partition ID field", "org.apache.spark.util.JsonProtocolSuite @ AccumulableInfo value de/serialization", "org.apache.spark.util.JsonProtocolSuite @ SPARK-31923: unexpected value type of internal accumulator", "org.apache.spark.util.JsonProtocolSuite @ SPARK-30936: forwards compatibility - ignore unknown fields", "org.apache.spark.util.JsonProtocolSuite @ SPARK-30936: backwards compatibility - set default values for missing fields", "org.apache.spark.util.KeyLockSuite @ The same key should wait when its lock is held", "org.apache.spark.util.KeyLockSuite @ A different key should not be locked", "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", "org.apache.spark.util.MutableURLClassLoaderSuite @ child first", "org.apache.spark.util.MutableURLClassLoaderSuite @ parent first", "org.apache.spark.util.MutableURLClassLoaderSuite @ child first can fall back", "org.apache.spark.util.MutableURLClassLoaderSuite @ child first can fail", "org.apache.spark.util.MutableURLClassLoaderSuite @ default JDK classloader get resources", "org.apache.spark.util.MutableURLClassLoaderSuite @ parent first get resources", "org.apache.spark.util.MutableURLClassLoaderSuite @ child first get resources", "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", "org.apache.spark.util.NextIteratorSuite @ one iteration", "org.apache.spark.util.NextIteratorSuite @ two iterations", "org.apache.spark.util.NextIteratorSuite @ empty iteration", "org.apache.spark.util.NextIteratorSuite @ close is called once for empty iterations", "org.apache.spark.util.NextIteratorSuite @ close is called once for non-empty iterations", "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", "org.apache.spark.util.random.RandomSamplerSuite @ utilities", "org.apache.spark.util.random.RandomSamplerSuite @ sanity check medianKSD against references", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli sampling", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli sampling without iterator", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli sampling with gap sampling optimization", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli sampling (without iterator) with gap sampling optimization", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli boundary cases", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli (without iterator) boundary cases", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli data types", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli clone", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli set seed", "org.apache.spark.util.random.RandomSamplerSuite @ replacement sampling", "org.apache.spark.util.random.RandomSamplerSuite @ replacement sampling without iterator", "org.apache.spark.util.random.RandomSamplerSuite @ replacement sampling with gap sampling", "org.apache.spark.util.random.RandomSamplerSuite @ replacement sampling (without iterator) with gap sampling", "org.apache.spark.util.random.RandomSamplerSuite @ replacement boundary cases", "org.apache.spark.util.random.RandomSamplerSuite @ replacement (without) boundary cases", "org.apache.spark.util.random.RandomSamplerSuite @ replacement data types", "org.apache.spark.util.random.RandomSamplerSuite @ replacement clone", "org.apache.spark.util.random.RandomSamplerSuite @ replacement set seed", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli partitioning sampling", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli partitioning sampling without iterator", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli partitioning boundary cases", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli partitioning (without iterator) boundary cases", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli partitioning data", "org.apache.spark.util.random.RandomSamplerSuite @ bernoulli partitioning clone", "org.apache.spark.util.random.SamplingUtilsSuite @ reservoirSampleAndCount", "org.apache.spark.util.random.SamplingUtilsSuite @ SPARK-18678 reservoirSampleAndCount with tiny input", "org.apache.spark.util.random.SamplingUtilsSuite @ computeFraction", "org.apache.spark.util.random.XORShiftRandomSuite @ XORShift generates valid random numbers", "org.apache.spark.util.random.XORShiftRandomSuite @ XORShift with zero seed", "org.apache.spark.util.random.XORShiftRandomSuite @ hashSeed has random bits throughout", "org.apache.spark.util.SizeEstimatorSuite @ simple classes", "org.apache.spark.util.SizeEstimatorSuite @ primitive wrapper objects", "org.apache.spark.util.SizeEstimatorSuite @ class field blocks rounding", "org.apache.spark.util.SizeEstimatorSuite @ strings", "org.apache.spark.util.SizeEstimatorSuite @ primitive arrays", "org.apache.spark.util.SizeEstimatorSuite @ object arrays", "org.apache.spark.util.SizeEstimatorSuite @ 32-bit arch", "org.apache.spark.util.SizeEstimatorSuite @ 64-bit arch with no compressed oops", "org.apache.spark.util.SizeEstimatorSuite @ class field blocks rounding on 64-bit VM without useCompressedOops", "org.apache.spark.util.SizeEstimatorSuite @ check 64-bit detection for s390x arch", "org.apache.spark.util.SizeEstimatorSuite @ SizeEstimation can provide the estimated size", "org.apache.spark.util.SparkUncaughtExceptionHandlerSuite @ SPARK-30310: Test uncaught RuntimeException, exitOnUncaughtException = true", "org.apache.spark.util.SparkUncaughtExceptionHandlerSuite @ SPARK-30310: Test uncaught RuntimeException, exitOnUncaughtException = false", "org.apache.spark.util.SparkUncaughtExceptionHandlerSuite @ SPARK-30310: Test uncaught OutOfMemoryError, exitOnUncaughtException = true", "org.apache.spark.util.SparkUncaughtExceptionHandlerSuite @ SPARK-30310: Test uncaught OutOfMemoryError, exitOnUncaughtException = false", "org.apache.spark.util.SparkUncaughtExceptionHandlerSuite @ SPARK-30310: Test uncaught SparkFatalException(RuntimeException), exitOnUncaughtException = true", "org.apache.spark.util.SparkUncaughtExceptionHandlerSuite @ SPARK-30310: Test uncaught SparkFatalException(RuntimeException), exitOnUncaughtException = false", "org.apache.spark.util.SparkUncaughtExceptionHandlerSuite @ SPARK-30310: Test uncaught SparkFatalException(OutOfMemoryError), exitOnUncaughtException = true", "org.apache.spark.util.SparkUncaughtExceptionHandlerSuite @ SPARK-30310: Test uncaught SparkFatalException(OutOfMemoryError), exitOnUncaughtException = false", "org.apache.spark.util.ThreadUtilsSuite @ newDaemonSingleThreadExecutor", "org.apache.spark.util.ThreadUtilsSuite @ newDaemonSingleThreadScheduledExecutor", "org.apache.spark.util.ThreadUtilsSuite @ newDaemonCachedThreadPool", "org.apache.spark.util.ThreadUtilsSuite @ sameThread", "org.apache.spark.util.ThreadUtilsSuite @ runInNewThread", "org.apache.spark.util.ThreadUtilsSuite @ parmap should be interruptible", "org.apache.spark.util.TimeStampedHashMapSuite @ HashMap - basic test", "org.apache.spark.util.TimeStampedHashMapSuite @ TimeStampedHashMap - basic test", "org.apache.spark.util.TimeStampedHashMapSuite @ TimeStampedHashMap - threading safety test", "org.apache.spark.util.TimeStampedHashMapSuite @ TimeStampedHashMap - clearing by timestamp", "org.apache.spark.util.UninterruptibleThreadRunnerSuite @ runUninterruptibly should switch to UninterruptibleThread", "org.apache.spark.util.UninterruptibleThreadRunnerSuite @ runUninterruptibly should not add new UninterruptibleThread", "org.apache.spark.util.UninterruptibleThreadSuite @ interrupt when runUninterruptibly is running", "org.apache.spark.util.UninterruptibleThreadSuite @ interrupt before runUninterruptibly runs", "org.apache.spark.util.UninterruptibleThreadSuite @ nested runUninterruptibly", "org.apache.spark.util.UninterruptibleThreadSuite @ stress test", "org.apache.spark.util.UtilsSuite @ timeConversion", "org.apache.spark.util.UtilsSuite @ Test byteString conversion", "org.apache.spark.util.UtilsSuite @ bytesToString", "org.apache.spark.util.UtilsSuite @ copyStream", "org.apache.spark.util.UtilsSuite @ copyStreamUpTo", "org.apache.spark.util.UtilsSuite @ memoryStringToMb", "org.apache.spark.util.UtilsSuite @ splitCommandString", "org.apache.spark.util.UtilsSuite @ string formatting of time durations", "org.apache.spark.util.UtilsSuite @ reading offset bytes of a file", "org.apache.spark.util.UtilsSuite @ reading offset bytes of a file (compressed)", "org.apache.spark.util.UtilsSuite @ reading offset bytes across multiple files", "org.apache.spark.util.UtilsSuite @ reading offset bytes across multiple files (compressed)", "org.apache.spark.util.UtilsSuite @ deserialize long value", "org.apache.spark.util.UtilsSuite @ writeByteBuffer should not change ByteBuffer position", "org.apache.spark.util.UtilsSuite @ get iterator size", "org.apache.spark.util.UtilsSuite @ getIteratorZipWithIndex", "org.apache.spark.util.UtilsSuite @ SPARK-35907: createDirectory", "org.apache.spark.util.UtilsSuite @ doesDirectoryContainFilesNewerThan", "org.apache.spark.util.UtilsSuite @ resolveURI", "org.apache.spark.util.UtilsSuite @ resolveURIs with multiple paths", "org.apache.spark.util.UtilsSuite @ nonLocalPaths", "org.apache.spark.util.UtilsSuite @ isBindCollision", "org.apache.spark.util.UtilsSuite @ log4j log level change", "org.apache.spark.util.UtilsSuite @ deleteRecursively", "org.apache.spark.util.UtilsSuite @ loading properties from file", "org.apache.spark.util.UtilsSuite @ timeIt with prepare", "org.apache.spark.util.UtilsSuite @ fetch hcfs dir", "org.apache.spark.util.UtilsSuite @ shutdown hook manager", "org.apache.spark.util.UtilsSuite @ isInDirectory", "org.apache.spark.util.UtilsSuite @ circular buffer: if nothing was written to the buffer, display nothing", "org.apache.spark.util.UtilsSuite @ circular buffer: if the buffer isn't full, print only the contents written", "org.apache.spark.util.UtilsSuite @ circular buffer: data written == size of the buffer", "org.apache.spark.util.UtilsSuite @ circular buffer: multiple overflow", "org.apache.spark.util.UtilsSuite @ isDynamicAllocationEnabled", "org.apache.spark.util.UtilsSuite @ getDynamicAllocationInitialExecutors", "org.apache.spark.util.UtilsSuite @ Set Spark CallerContext", "org.apache.spark.util.UtilsSuite @ encodeFileNameToURIRawPath", "org.apache.spark.util.UtilsSuite @ decodeFileNameInURI", "org.apache.spark.util.UtilsSuite @ Kill process", "org.apache.spark.util.UtilsSuite @ chi square test of randomizeInPlace", "org.apache.spark.util.UtilsSuite @ redact sensitive information", "org.apache.spark.util.UtilsSuite @ redact sensitive information in command line args", "org.apache.spark.util.UtilsSuite @ redact sensitive information in sequence of key value pairs", "org.apache.spark.util.UtilsSuite @ tryWithSafeFinally", "org.apache.spark.util.UtilsSuite @ tryWithSafeFinallyAndFailureCallbacks", "org.apache.spark.util.UtilsSuite @ load extensions", "org.apache.spark.util.UtilsSuite @ check Kubernetes master URL", "org.apache.spark.util.UtilsSuite @ stringHalfWidth", "org.apache.spark.util.UtilsSuite @ trimExceptCRLF standalone", "org.apache.spark.util.UtilsSuite @ pathsToMetadata", "org.apache.spark.util.UtilsSuite @ checkHost supports both IPV4 and IPV6", "org.apache.spark.util.UtilsSuite @ checkHostPort support IPV6 and IPV4", "org.apache.spark.util.UtilsSuite @ parseHostPort support IPV6 and IPV4", "org.apache.spark.util.UtilsSuite @ executorOffHeapMemorySizeAsMb when MEMORY_OFFHEAP_ENABLED is false", "org.apache.spark.util.UtilsSuite @ executorOffHeapMemorySizeAsMb when MEMORY_OFFHEAP_ENABLED is true", "org.apache.spark.util.UtilsSuite @ executorMemoryOverhead when MEMORY_OFFHEAP_ENABLED is true, but MEMORY_OFFHEAP_SIZE not config scene", "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1", "org.apache.spark.util.VersionUtilsSuite @ Parse Spark major version"] \ No newline at end of file diff --git a/core/identify_param/runner.py b/core/identify_param/runner.py index 9fc34a83..aac92ef4 100644 --- a/core/identify_param/runner.py +++ b/core/identify_param/runner.py @@ -140,6 +140,22 @@ def persist_list(self, method_list, file_name): json_file = open("results/%s/logs/%s.json" % (self.module, file_name), "w") json.dump(method_list, json_file) json_file.close() + + def write_report(self, src_file, dst_file, method): + f_src = open(src_file, "r") + f_dst = open(dst_file, "w") + lines = f_src.readlines() + writed = False + test_start = False + method_name = method.split(" @ ")[1] + for line in lines: + if "- " + method_name in line: + test_start = True + print("- " + method_name) + if test_start and ("[CTEST][GET-PARAM]" in line or "[CTEST][SET-PARAM]" in line): + f_dst.write(line) + writed = True + return writed def run_individual_testmethod(self): all_test_methods = json.load(open("%s" % (self.run_list))) @@ -158,16 +174,28 @@ def run_individual_testmethod(self): for method in all_test_methods: print("==================================================================================") - assert method.count("#") == 1, "there should be only one #, but actually you have: " + method + if self.module in ["spark-core"]: + assert method.count("@") == 1, "there should be only one @, but actually you have: " + method + else: + assert method.count("#") == 1, "there should be only one #, but actually you have: " + method - method_out = open(out_dir + method + "-log.txt", "w+") + if self.module in ["spark-core"]: + log_file_name = out_dir + (method.split('.')[-1].replace("/", "_")) + "-log.txt" + else: + log_file_name = out_dir + method + "-log.txt" + method_out = open(log_file_name, "w+") method_report_path = report_dir + method + "-report.txt" start_time_for_this_method = time.time() if self.module == "alluxio-core": cmd = ["mvn", "surefire:test", "-Dtest=" + method, "-DfailIfNoTests=false"] + elif self.module == "spark-core": + cmd = ["mvn", "test", "-Dtest=none", "-Dsuites=" + method] else: cmd = ["mvn", "surefire:test", "-Dtest=" + method] - print ("mvn surefire:test -Dtest="+method) + if self.module == "spark-core": + print ("mvn test -Dsuites="+method) + else: + print ("mvn surefire:test -Dtest="+method) child = subprocess.Popen(cmd, stdout=method_out, stderr=method_out) child.wait() @@ -186,15 +214,22 @@ def run_individual_testmethod(self): self.failure_list.append(method) continue - class_name = method.split("#")[0] - suffix_filename_to_check = class_name + "-output.txt" - full_path = self.get_full_report_path(suffix_filename_to_check) - if full_path == "none": - print("no report for " + method) - self.no_report_list.append(method) + if self.module == "spark-core": + if self.write_report(log_file_name, method_report_path, method): + self.parse(open(method_report_path, "r").readlines(), method) + else: + print("no report for " + method) + self.no_report_list.append(method) else: - shutil.copy(full_path, method_report_path) - self.parse(open(full_path, "r").readlines(), method) + class_name = method.split("#")[0] + suffix_filename_to_check = class_name + "-output.txt" + full_path = self.get_full_report_path(suffix_filename_to_check) + if full_path == "none": + print("no report for " + method) + self.no_report_list.append(method) + else: + shutil.copy(full_path, method_report_path) + self.parse(open(full_path, "r").readlines(), method) shutil.rmtree(out_dir) shutil.rmtree(report_dir) diff --git a/core/patch/spark/logging.patch b/core/patch/spark/logging.patch new file mode 100644 index 00000000..788884ac --- /dev/null +++ b/core/patch/spark/logging.patch @@ -0,0 +1,98 @@ +From add1459f8e356b4dcd8c9f517b94dd6d495b7b9d Mon Sep 17 00:00:00 2001 +From: ZHLOLin +Date: Thu, 1 Dec 2022 21:38:30 -0600 +Subject: [PATCH] Update SparkConf.scala + +Enable ctest logging +--- + .../scala/org/apache/spark/SparkConf.scala | 25 ++++++++++++++++++- + 1 file changed, 24 insertions(+), 1 deletion(-) + +diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala b/core/src/main/scala/org/apache/spark/SparkConf.scala +index 5f37a1abb1..d8794706a4 100644 +--- a/core/src/main/scala/org/apache/spark/SparkConf.scala ++++ b/core/src/main/scala/org/apache/spark/SparkConf.scala +@@ -70,6 +70,14 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria + loadFromSystemProperties(false) + } + ++ // For CTest ++ private[spark] def getStackTrace(): String = { ++ var stackTrace = " " ++ for (e <- Thread.currentThread().getStackTrace()) ++ stackTrace = stackTrace.concat(e.getClassName() + "\t") ++ stackTrace ++ } ++ + private[spark] def loadFromSystemProperties(silent: Boolean): SparkConf = { + // Load any spark.* system properties + for ((key, value) <- Utils.getSystemProperties if key.startsWith("spark.")) { +@@ -83,7 +91,8 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria + set(key, value, false) + } + +- private[spark] def set(key: String, value: String, silent: Boolean): SparkConf = { ++ private[spark] def set(key: String, value: String, silent: Boolean ++ , ctest: Boolean = true): SparkConf = { + if (key == null) { + throw new NullPointerException("null key") + } +@@ -94,6 +103,10 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria + logDeprecationWarning(key) + } + settings.put(key, value) ++ if (ctest) { ++ // scalastyle:off println ++ Console.println("[CTEST][SET-PARAM] " + key + getStackTrace()) // CTest ++ } + this + } + +@@ -175,6 +188,8 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria + + /** Set a parameter if it isn't already configured */ + def setIfMissing(key: String, value: String): SparkConf = { ++ // scalastyle:off println ++ Console.println("[CTEST][SET-PARAM] " + key + getStackTrace()) + if (settings.putIfAbsent(key, value) == null) { + logDeprecationWarning(key) + } +@@ -182,6 +197,8 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria + } + + private[spark] def setIfMissing[T](entry: ConfigEntry[T], value: T): SparkConf = { ++ // scalastyle:off println ++ Console.println("[CTEST][SET-PARAM] " + entry.key + getStackTrace()) + if (settings.putIfAbsent(entry.key, entry.stringConverter(value)) == null) { + logDeprecationWarning(entry.key) + } +@@ -189,6 +206,8 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria + } + + private[spark] def setIfMissing[T](entry: OptionalConfigEntry[T], value: T): SparkConf = { ++ // scalastyle:off println ++ Console.println("[CTEST][SET-PARAM] " + entry.key + getStackTrace()) + if (settings.putIfAbsent(entry.key, entry.rawStringConverter(value)) == null) { + logDeprecationWarning(entry.key) + } +@@ -258,6 +277,8 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria + * - This will throw an exception is the config is not optional and the value is not set. + */ + private[spark] def get[T](entry: ConfigEntry[T]): T = { ++ // scalastyle:off println ++ Console.println("[CTEST][GET-PARAM] " + entry.key) // CTest + entry.readFrom(reader) + } + +@@ -385,6 +406,8 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria + + /** Get a parameter as an Option */ + def getOption(key: String): Option[String] = { ++ // scalastyle:off println ++ Console.println("[CTEST][GET-PARAM] " + key) // CTest + Option(settings.get(key)).orElse(getDeprecatedConfig(key, settings)) + } + +-- +2.25.1 + diff --git a/core/run_ctest/inject.py b/core/run_ctest/inject.py index 75f5b443..b2c29316 100644 --- a/core/run_ctest/inject.py +++ b/core/run_ctest/inject.py @@ -2,6 +2,7 @@ import sys import xml.etree.ElementTree as ET +import shutil sys.path.append("..") from ctest_const import * @@ -35,6 +36,28 @@ def inject_config(param_value_pairs): file.write(str.encode("\n\n")) file.write(ET.tostring(conf)) file.close() + elif project in [SPARK]: + for inject_path in INJECTION_PATH[project]: + back_up = inject_path + "/back_up.xml" + inject_path = inject_path + "/pom.xml" + shutil.copyfile(inject_path, back_up) + print(">>>>[ctest_core] injecting into file: {}".format(inject_path)) + tree = ET.parse(inject_path) + pom = tree.getroot() + namespace = pom.tag.split('{')[1].split('}')[0] + # for reading + namespace_mapping = {'mvnns': namespace} + # for writing: otherwise 'xmlns:ns0' will be used instead of the standard xml namespace 'xmlns' + ET.register_namespace('', namespace) + ns = "{http://maven.apache.org/POM/4.0.0}" + for child in pom.findall("%sbuild/%spluginManagement/%splugins/%splugin" % (ns, ns, ns, ns)): + gid = child.find("%sgroupId" % ns) + if gid.text == "org.scalatest": + child = child.find("%sconfiguration/%ssystemProperties" % (ns, ns)) + for p, v in param_value_pairs.items(): + sub = ET.SubElement(child, '%s%s' % (ns, p)) + sub.text = v + tree.write(inject_path, encoding='utf-8') else: sys.exit(">>>>[ctest_core] value injection for {} is not supported yet".format(project)) @@ -53,5 +76,10 @@ def clean_conf_file(project): file.write(str.encode("\n\n")) file.write(ET.tostring(conf)) file.close() + elif project in [SPARK]: + for inject_path in INJECTION_PATH[project]: + back_up = inject_path + "/back_up.xml" + inject_path = inject_path + "/pom.xml" + shutil.copyfile(back_up, inject_path) else: sys.exit(">>>>[ctest_core] value injection for {} is not supported yet".format(project)) diff --git a/core/run_ctest/parse_input.py b/core/run_ctest/parse_input.py index 5b4bedbf..1fad9a60 100644 --- a/core/run_ctest/parse_input.py +++ b/core/run_ctest/parse_input.py @@ -39,7 +39,7 @@ def load_default_conf(path): def parse_conf_file(path): """parse config file""" - if project in [HCOMMON, HDFS, HBASE]: + if project in [HCOMMON, HDFS, HBASE, SPARK]: return parse_conf_file_xml(path) else: # parsing for alluxio and zookeeper conf file format diff --git a/core/run_ctest/program_input.py b/core/run_ctest/program_input.py index 2b971f1c..d067172c 100644 --- a/core/run_ctest/program_input.py +++ b/core/run_ctest/program_input.py @@ -4,15 +4,15 @@ # run mode "run_mode": "run_ctest", # string # name of the project, i.e. hadoop-common, hadoop-hdfs - "project": "hadoop-common", # string + "project": "spark-core", # string # path to param -> tests json mapping - "mapping_path": "../../data/ctest_mapping/opensource-hadoop-common.json", # string + "mapping_path": "../../data/ctest_mapping/opensource-spark-core.json", # string # input directory hosting configuration files to be test, target-project-format specific - "conf_file_dir": "sample-hadoop-common", # string + "conf_file_dir": "sample-spark-core", # string # display the terminal output live, without saving any results "display_mode": False, # bool # whether to use mvn test or mvn surefire:test - "use_surefire": False, # bool + "use_surefire": True, # bool # additional maven options to pass to `mvn surefire:test -Dtest=...` "maven_args": [], # list of strings, each element is an option # timeout on the mvn test command diff --git a/core/run_ctest/run_single_ctest.py b/core/run_ctest/run_single_ctest.py index 7d299e3a..6dc3373d 100755 --- a/core/run_ctest/run_single_ctest.py +++ b/core/run_ctest/run_single_ctest.py @@ -23,7 +23,7 @@ def main(argv): def test_conf_file(test_input, ctestname): params = test_input.keys() - associated_test_map = {p: [ctestname] for p in params if ctestname in mapping[p]} + associated_test_map = {p: [ctestname] for p in params if p in mapping[ctestname]} print(">>>>[ctest_core] # parameters associated with the run: {}".format(len(params))) tr = run_test_batch(test_input, associated_test_map) tup = tr.ran_tests_and_time.pop() diff --git a/core/run_ctest/run_test.py b/core/run_ctest/run_test.py index e156567f..0a92409d 100644 --- a/core/run_ctest/run_test.py +++ b/core/run_ctest/run_test.py @@ -30,11 +30,11 @@ def run_test_batch(param_values, associated_test_map): tested_params, tests = group inject_config({p: param_values[p] for p in tested_params}) print(">>>>[ctest_core] running group {} where {} params shares {} ctests".format(index, len(tested_params), len(tests))) - test_str = run_test_utils.join_test_string(tests) + test_str = run_test_utils.join_test_string(tests, project=project) os.chdir(testing_dir) print(">>>>[ctest_core] chdir to {}".format(testing_dir)) - cmd = run_test_utils.maven_cmd(test_str) + cmd = run_test_utils.maven_cmd(test_str, project=project) if display_mode: os.system(" ".join(cmd)) continue @@ -60,7 +60,7 @@ def run_test_batch(param_values, associated_test_map): print_output = run_test_utils.strip_ansi(stdout.decode("ascii", "ignore")) print(print_output) - test_by_cls = run_test_utils.group_test_by_cls(tests) + test_by_cls = run_test_utils.group_test_by_cls(tests, project=project) for clsname, methods in test_by_cls.items(): times, errors = parse_surefire(clsname, methods) for m in methods: diff --git a/core/run_ctest/run_test_utils.py b/core/run_ctest/run_test_utils.py index 12dfa13c..5d45e47e 100644 --- a/core/run_ctest/run_test_utils.py +++ b/core/run_ctest/run_test_utils.py @@ -15,11 +15,16 @@ def __init__(self, ran_tests_and_time=set(), failed_tests=set()): self.ran_tests_and_time = ran_tests_and_time -def maven_cmd(test, add_time=False): +def maven_cmd(test, add_time=False, project=None): # surefire:test reuses test build from last compilation # if you modified the test and want to rerun it, you must use `mvn test` - test_mode = "surefire:test" if use_surefire else "test" - cmd = ["mvn", test_mode, "-Dtest={}".format(test)] + maven_args + cmd = None + if project == SPARK: + test_mode = "scalatest:test" if use_surefire else "test" + cmd = ["mvn", test_mode, "-Dtest=none", "-Dsuites=" + test] + maven_args + else: + test_mode = "surefire:test" if use_surefire else "test" + cmd = ["mvn", test_mode, "-Dtest={}".format(test)] + maven_args if add_time: cmd = ["time"] + cmd print(">>>>[ctest_core] command: " + " ".join(cmd)) @@ -30,20 +35,30 @@ def strip_ansi(s): return ansi_escape.sub('', s) -def join_test_string(tests): - test_by_cls = group_test_by_cls(tests) +def join_test_string(tests, project=None): + test_by_cls = group_test_by_cls(tests, project) ret = "" + split = None + if project == SPARK: + split = " @ " + else: + split = "#" for clsname, methods in test_by_cls.items(): ret += clsname - ret += "#" + ret += split ret += "+".join(list(methods)) ret += "," return ret -def group_test_by_cls(tests): +def group_test_by_cls(tests, project=None): d = {} + split = None + if project == SPARK: + split = " @ " + else: + split = "#" for t in tests: - clsname, method = t.split("#") + clsname, method = t.split(split) if clsname not in d: d[clsname] = set() d[clsname].add(method) diff --git a/core/setup_ubuntu.sh b/core/setup_ubuntu.sh index 4defe7fe..647a406d 100755 --- a/core/setup_ubuntu.sh +++ b/core/setup_ubuntu.sh @@ -2,10 +2,17 @@ # set up env for Linux ubuntu sudo apt-get install openjdk-8-jdk -sudo apt-get install maven +sudo tar xzf apache-maven-3.8.6-bin.tar.gz -C /opt +sudo ln -s /opt/apache-maven-3.8.6 /opt/maven +sudo apt-get install scala +# sudo nano /etc/profile.d/maven.sh +# Add the following lines to the maven.sh file: +# export JAVA_HOME=/usr/lib/jvm/default-java +# export M2_HOME=/opt/maven +# export MAVEN_HOME=/opt/maven +# export PATH=${M2_HOME}/bin:${PATH} sudo apt-get install build-essential autoconf automake libtool cmake zlib1g-dev pkg-config libssl-dev - # install protobuf 2.5 curdir=$PWD cd /usr/local/src/ diff --git a/data/ctest_mapping/opensource-spark-core.json b/data/ctest_mapping/opensource-spark-core.json new file mode 100644 index 00000000..e5a44e5d --- /dev/null +++ b/data/ctest_mapping/opensource-spark-core.json @@ -0,0 +1,155816 @@ +{ + "spark.ui.killEnabled": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill application", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill driver", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill one host", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill multiple hosts", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.driver.log.persistToDfs.enabled": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.driver.port": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.mapOutput.minSizeForBroadcast": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ min broadcast size exceeds max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ equally divide map statistics tasks", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.local.dir": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockManagerSuite @ basic block creation", + "org.apache.spark.storage.DiskBlockManagerSuite @ enumerating blocks", + "org.apache.spark.storage.DiskBlockManagerSuite @ SPARK-22227: non-block files are skipped", + "org.apache.spark.storage.DiskBlockManagerSuite @ should still create merge directories if one already exists under a local dir", + "org.apache.spark.storage.DiskBlockManagerSuite @ Encode merged directory name and attemptId in shuffleManager field", + "org.apache.spark.storage.DiskStoreSuite @ reads of memory-mapped and non memory-mapped files are equivalent", + "org.apache.spark.storage.DiskStoreSuite @ block size tracking", + "org.apache.spark.storage.DiskStoreSuite @ blocks larger than 2gb", + "org.apache.spark.storage.DiskStoreSuite @ block data encryption", + "org.apache.spark.storage.FallbackStorageSuite @ fallback storage APIs - copy/exists", + "org.apache.spark.storage.FallbackStorageSuite @ SPARK-39200: fallback storage APIs - readFully", + "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.speculation.interval": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.rdd.compress": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.compress": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.requestHeaderSize": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.HistoryServerPageSuite @ SPARK-39620: should behaves the same as REST API when filtering applications", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill application", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill driver", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill one host", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill multiple hosts", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ jetty selects different port under contention", + "org.apache.spark.ui.UISuite @ jetty with https selects different port under contention", + "org.apache.spark.ui.UISuite @ jetty binds to port 0 correctly", + "org.apache.spark.ui.UISuite @ jetty with https binds to port 0 correctly", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ add and remove handlers with custom user filter", + "org.apache.spark.ui.UISuite @ SPARK-32467: Avoid encoding URL twice on https redirect", + "org.apache.spark.ui.UISuite @ http -> https redirect applies to all URIs", + "org.apache.spark.ui.UISuite @ specify both http and https ports separately", + "org.apache.spark.ui.UISuite @ redirect with proxy server support", + "org.apache.spark.ui.UISuite @ SPARK-34449: Jetty 9.4.35.v20201120 and later no longer return status code 302 and handle internally when request URL ends with a context path without trailing '/'", + "org.apache.spark.ui.UISuite @ SPARK-34449: default thread pool size of different jetty servers", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.logConf": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.broadcast.compress": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.scheduler.listenerbus.eventqueue.appStatus.capacity": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ add and remove listeners to/from LiveListenerBus queues", + "org.apache.spark.scheduler.SparkListenerSuite @ interrupt within listener is handled correctly: throw interrupt", + "org.apache.spark.scheduler.SparkListenerSuite @ interrupt within listener is handled correctly: set Thread interrupted", + "org.apache.spark.scheduler.SparkListenerSuite @ event queue size can be configured through spark conf", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.cleaner.referenceTracking.blocking.shuffle": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.reverseProxy": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", + "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.push.minShuffleSizeToWait": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.storage.decommission.shuffleBlocks.maxDiskSize": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ commit shuffle files multiple times", + "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ SPARK-33198 getMigrationBlocks should not fail at missing files", + "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ getMergedBlockData should return expected FileSegmentManagedBuffer list", + "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ getMergedBlockMeta should return expected MergedBlockMeta", + "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ fallback storage APIs - copy/exists", + "org.apache.spark.storage.FallbackStorageSuite @ SPARK-39200: fallback storage APIs - readFully", + "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.executor.memoryOverhead": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing no resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing one resource", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn no dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported k8s with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported standalone with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported with local mode", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager has equivalent profile", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile", + "org.apache.spark.resource.ResourceProfileSuite @ Executor cores should be None by default for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Get resource for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile with app level resources specified", + "org.apache.spark.resource.ResourceProfileSuite @ test default profile task gpus fractional", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting no other resource no executor cores", + "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover resource profile remaining", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.spill.compress": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.push.merge.finalizeThreads": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.kryo.referenceTracking": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer supports relocation when auto-reset is enabled", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer does not support relocation when auto-reset is disabled", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ supported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ unsupported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ Test byteString conversion", + "org.apache.spark.SparkConfSuite @ Test timeString conversion", + "org.apache.spark.SparkConfSuite @ loading from system properties", + "org.apache.spark.SparkConfSuite @ initializing without loading defaults", + "org.apache.spark.SparkConfSuite @ named set methods", + "org.apache.spark.SparkConfSuite @ basic get and set", + "org.apache.spark.SparkConfSuite @ basic getAllWithPrefix", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ nested property names", + "org.apache.spark.SparkConfSuite @ Thread safeness - SPARK-5425", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses and custom registrator", + "org.apache.spark.SparkConfSuite @ register kryo classes through conf", + "org.apache.spark.SparkConfSuite @ deprecated configs", + "org.apache.spark.SparkConfSuite @ SPARK-13727", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (java)", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (kryo)", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkConfSuite @ SPARK-26998: SSL configuration not needed on executors", + "org.apache.spark.SparkConfSuite @ SPARK-27244 toDebugString redacts sensitive information", + "org.apache.spark.SparkConfSuite @ SPARK-28355: Use Spark conf for threshold at which UDFs are compressed by broadcast", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default long throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default string throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getDouble throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getInt throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getBoolean throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getLong throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ get task resource requirement from config", + "org.apache.spark.SparkConfSuite @ test task resource requirement with 0 amount", + "org.apache.spark.SparkConfSuite @ Ensure that we can configure fractional resources for a task", + "org.apache.spark.SparkConfSuite @ Non-task resources are never fractional", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.shuffle.service.enabled": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceMetricsSuite @ SPARK-31646: metrics should be registered", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ basic executor timeout", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track tasks running on executor", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ use appropriate time out depending on whether blocks are stored", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ keeps track of stored blocks for each rdd and split", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ handle timeouts correctly with multiple executors", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-38019: timedOutExecutors should be deterministic", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-27677: don't track blocks stored on disk when using shuffle service", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track executors pending for removal", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle block tracking", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28839: Avoids NPE in context cleaner when shuffle service is on", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle tracking with multiple executors and concurrent jobs", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28455: avoid overflow in timeout calculation", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-37688: ignore SparkListenerBlockUpdated event if executor was not active", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockManagerSuite @ basic block creation", + "org.apache.spark.storage.DiskBlockManagerSuite @ enumerating blocks", + "org.apache.spark.storage.DiskBlockManagerSuite @ SPARK-22227: non-block files are skipped", + "org.apache.spark.storage.DiskBlockManagerSuite @ should still create merge directories if one already exists under a local dir", + "org.apache.spark.storage.DiskBlockManagerSuite @ Encode merged directory name and attemptId in shuffleManager field", + "org.apache.spark.storage.DiskStoreSuite @ reads of memory-mapped and non memory-mapped files are equivalent", + "org.apache.spark.storage.DiskStoreSuite @ block size tracking", + "org.apache.spark.storage.DiskStoreSuite @ blocks larger than 2gb", + "org.apache.spark.storage.DiskStoreSuite @ block data encryption", + "org.apache.spark.storage.FallbackStorageSuite @ fallback storage APIs - copy/exists", + "org.apache.spark.storage.FallbackStorageSuite @ SPARK-39200: fallback storage APIs - readFully", + "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.LocalDirsSuite @ SPARK_LOCAL_DIRS override also affects driver", + "org.apache.spark.storage.LocalDirsSuite @ Utils.getLocalDir() throws an exception if any temporary directory cannot be retrieved", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.shuffle.useOldFetchProtocol": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.speculation.minTaskRuntime": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.serializer": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.storage.localDiskByExecutors.cacheSize": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.storage.decommission.fallbackStorage.path": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test that with no blocks we finish migration", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no migrations configured", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no peers", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with only shuffle files time moves forward", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager does not re-add removed shuffle files", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ SPARK-40168: block decom manager handles shuffle file not found", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager handles IO failures", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager short circuits removed blocks", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test shuffle and cached rdd migration without any error", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.FallbackStorageSuite @ fallback storage APIs - copy/exists", + "org.apache.spark.storage.FallbackStorageSuite @ SPARK-39200: fallback storage APIs - readFully", + "org.apache.spark.storage.FallbackStorageSuite @ SPARK-34142: fallback storage API - cleanUp", + "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.kryo.registrator": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer supports relocation when auto-reset is enabled", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer does not support relocation when auto-reset is disabled", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ supported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ unsupported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ Test byteString conversion", + "org.apache.spark.SparkConfSuite @ Test timeString conversion", + "org.apache.spark.SparkConfSuite @ loading from system properties", + "org.apache.spark.SparkConfSuite @ initializing without loading defaults", + "org.apache.spark.SparkConfSuite @ named set methods", + "org.apache.spark.SparkConfSuite @ basic get and set", + "org.apache.spark.SparkConfSuite @ basic getAllWithPrefix", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ nested property names", + "org.apache.spark.SparkConfSuite @ Thread safeness - SPARK-5425", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses and custom registrator", + "org.apache.spark.SparkConfSuite @ register kryo classes through conf", + "org.apache.spark.SparkConfSuite @ deprecated configs", + "org.apache.spark.SparkConfSuite @ SPARK-13727", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (java)", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (kryo)", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkConfSuite @ SPARK-26998: SSL configuration not needed on executors", + "org.apache.spark.SparkConfSuite @ SPARK-27244 toDebugString redacts sensitive information", + "org.apache.spark.SparkConfSuite @ SPARK-28355: Use Spark conf for threshold at which UDFs are compressed by broadcast", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default long throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default string throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getDouble throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getInt throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getBoolean throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getLong throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ get task resource requirement from config", + "org.apache.spark.SparkConfSuite @ test task resource requirement with 0 amount", + "org.apache.spark.SparkConfSuite @ Ensure that we can configure fractional resources for a task", + "org.apache.spark.SparkConfSuite @ Non-task resources are never fractional", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.executor.memory": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-33530: handles standalone mode with archives", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles mesos client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles k8s cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles confs with flag equivalents", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing no resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing one resource", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ insufficient executor memory", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn no dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported k8s with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported standalone with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported with local mode", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager has equivalent profile", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile", + "org.apache.spark.resource.ResourceProfileSuite @ Executor cores should be None by default for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Get resource for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile with app level resources specified", + "org.apache.spark.resource.ResourceProfileSuite @ test default profile task gpus fractional", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting no other resource no executor cores", + "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover resource profile remaining", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.network.maxRemoteBlockSizeFetchToMem": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.push.enabled": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ min broadcast size exceeds max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ equally divide map statistics tasks", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Basic block push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are skipped for push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in flight per address are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Hit maxBlocksInFlightPerAddress limit so that the blocks are deferred", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of shuffle blocks grouped in a single push request is limited by maxBlockBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-36255: FileNotFoundException stops the push", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockManagerSuite @ basic block creation", + "org.apache.spark.storage.DiskBlockManagerSuite @ enumerating blocks", + "org.apache.spark.storage.DiskBlockManagerSuite @ SPARK-22227: non-block files are skipped", + "org.apache.spark.storage.DiskBlockManagerSuite @ should still create merge directories if one already exists under a local dir", + "org.apache.spark.storage.DiskBlockManagerSuite @ Encode merged directory name and attemptId in shuffleManager field", + "org.apache.spark.storage.DiskStoreSuite @ reads of memory-mapped and non memory-mapped files are equivalent", + "org.apache.spark.storage.DiskStoreSuite @ block size tracking", + "org.apache.spark.storage.DiskStoreSuite @ blocks larger than 2gb", + "org.apache.spark.storage.DiskStoreSuite @ block data encryption", + "org.apache.spark.storage.FallbackStorageSuite @ fallback storage APIs - copy/exists", + "org.apache.spark.storage.FallbackStorageSuite @ SPARK-39200: fallback storage APIs - readFully", + "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.memory.fraction": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ single task requesting on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ two tasks requesting full on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ two tasks cannot grow past 1 / N of on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ tasks can block to get at least 1 / 2N of on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ SPARK-35486: memory freed by self-spilling is taken by another task", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ TaskMemoryManager.cleanUpAllAllocatedMemory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ tasks should not be granted a negative amount of execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ off-heap execution allocations cannot exceed limit", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ basic execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ basic storage memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution evicts storage", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution memory requests smaller than free memory should evict storage (SPARK-12165)", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ storage does not evict execution", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ small heap", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ insufficient executor memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution can evict cached blocks when there are multiple active tasks (SPARK-12155)", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ SPARK-15260: atomically resize memory pools", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ not enough free memory in the storage pool --OFF_HEAP", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.task.reaper.enabled": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.storage.replication.proactive": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.filters": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.HistoryServerPageSuite @ SPARK-39620: should behaves the same as REST API when filtering applications", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill application", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill driver", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill one host", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill multiple hosts", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ add and remove handlers with custom user filter", + "org.apache.spark.ui.UISuite @ SPARK-32467: Avoid encoding URL twice on https redirect", + "org.apache.spark.ui.UISuite @ http -> https redirect applies to all URIs", + "org.apache.spark.ui.UISuite @ redirect with proxy server support", + "org.apache.spark.ui.UISuite @ SPARK-34449: Jetty 9.4.35.v20201120 and later no longer return status code 302 and handle internally when request URL ends with a context path without trailing '/'", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.timeline.jobs.maximum": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.reduceLocality.enabled": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ min broadcast size exceeds max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ equally divide map statistics tasks", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.master": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission with multiple masters", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission from main method", + "org.apache.spark.deploy.security.HadoopFSDelegationTokenProviderSuite @ hadoopFSsToAccess should return defaultFS even if not configured", + "org.apache.spark.deploy.SparkSubmitSuite @ handles k8s cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles confs with flag equivalents", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.SparkSubmitSuite @ get a Spark configuration from arguments", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing no resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing one resource", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn no dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported k8s with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported standalone with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported with local mode", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager has equivalent profile", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile", + "org.apache.spark.resource.ResourceProfileSuite @ Executor cores should be None by default for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Get resource for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile with app level resources specified", + "org.apache.spark.resource.ResourceProfileSuite @ test default profile task gpus fractional", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor cpus", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting no executor cores", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting no other resource no executor cores", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting executor cores", + "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover resource profile remaining", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.SecurityManagerSuite @ missing secret authentication key", + "org.apache.spark.SecurityManagerSuite @ secret file must be defined in both driver and executor", + "org.apache.spark.SecurityManagerSuite @ master yarn cannot use file mounted secrets", + "org.apache.spark.SecurityManagerSuite @ master local cannot use file mounted secrets", + "org.apache.spark.SecurityManagerSuite @ master local[*] cannot use file mounted secrets", + "org.apache.spark.SecurityManagerSuite @ master mesos://localhost:8080 cannot use file mounted secrets", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'yarn'", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'local'", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'local[*]'", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'k8s://127.0.0.1'", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'k8s://127.0.1.1'", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'invalid'", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ named set methods", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isDynamicAllocationEnabled", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.executor.metrics.fileSystemSchemes": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.network.timeout": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ FileSystemPersistenceEngine", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ ZooKeeperPersistenceEngine", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission with multiple masters", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission from main method", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ request submission status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill or request status before create", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ bad request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", + "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher shuts down on valid disassociation", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher stays alive on invalid disassociation", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a RpcEndpointRef", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message abort", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onStart and onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ call receive in sequence", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ stop(RpcEndpointRef) reentrant", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in client mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in client RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: unserializable error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ port conflict", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ construct RpcTimeout with conf property", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout on Future using RpcTimeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ isolated endpoints", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ non-existent endpoint", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ advertise address different from bind address", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ RequestMessage serialization", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ StackOverflowError should be sent back and Dispatcher should survive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-31233: ask rpcEndpointRef in client mode timeout", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerMasterSuite @ SPARK-31422: getMemoryStatus should not fail after BlockManagerMaster stops", + "org.apache.spark.storage.BlockManagerMasterSuite @ SPARK-31422: getStorageStatus should not fail after BlockManagerMaster stops", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ fallback storage APIs - copy/exists", + "org.apache.spark.storage.FallbackStorageSuite @ SPARK-39200: fallback storage APIs - readFully", + "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.retainedJobs": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: 1 task", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: few tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: more tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: lots of tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: custom quantiles", + "org.apache.spark.status.AppStatusStoreSuite @ quantile cache", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk leveldb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk rocksdb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory live)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary should not be present if there are no speculative tasks", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.StagePageSuite @ ApiHelper.COLUMN_TO_INDEX should match headers of the task table", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.proxyRedirectUri": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.HistoryServerPageSuite @ SPARK-39620: should behaves the same as REST API when filtering applications", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill application", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill driver", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill one host", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill multiple hosts", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ jetty selects different port under contention", + "org.apache.spark.ui.UISuite @ jetty with https selects different port under contention", + "org.apache.spark.ui.UISuite @ jetty binds to port 0 correctly", + "org.apache.spark.ui.UISuite @ jetty with https binds to port 0 correctly", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ add and remove handlers with custom user filter", + "org.apache.spark.ui.UISuite @ SPARK-32467: Avoid encoding URL twice on https redirect", + "org.apache.spark.ui.UISuite @ http -> https redirect applies to all URIs", + "org.apache.spark.ui.UISuite @ specify both http and https ports separately", + "org.apache.spark.ui.UISuite @ redirect with proxy server support", + "org.apache.spark.ui.UISuite @ SPARK-34449: Jetty 9.4.35.v20201120 and later no longer return status code 302 and handle internally when request URL ends with a context path without trailing '/'", + "org.apache.spark.ui.UISuite @ SPARK-34449: default thread pool size of different jetty servers", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.io.backLog": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceMetricsSuite @ SPARK-31646: metrics should be registered", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a random port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to two random ports", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port twice and the second increments", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.app.name": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-33530: handles standalone mode with archives", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ named set methods", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.rpc.io.backLog": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ FileSystemPersistenceEngine", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ ZooKeeperPersistenceEngine", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission with multiple masters", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission from main method", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ request submission status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill or request status before create", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ bad request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", + "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher shuts down on valid disassociation", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher stays alive on invalid disassociation", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a RpcEndpointRef", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message abort", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onStart and onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ call receive in sequence", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ stop(RpcEndpointRef) reentrant", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in client mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in client RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: unserializable error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ port conflict", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ construct RpcTimeout with conf property", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout on Future using RpcTimeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ isolated endpoints", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ non-existent endpoint", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ advertise address different from bind address", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ RequestMessage serialization", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ StackOverflowError should be sent back and Dispatcher should survive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-31233: ask rpcEndpointRef in client mode timeout", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.executor.extraJavaOptions": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.liveUpdate.period": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: 1 task", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: few tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: more tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: lots of tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: custom quantiles", + "org.apache.spark.status.AppStatusStoreSuite @ quantile cache", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk leveldb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk rocksdb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory live)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary should not be present if there are no speculative tasks", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.StagePageSuite @ ApiHelper.COLUMN_TO_INDEX should match headers of the task table", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.retainedTasks": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: 1 task", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: few tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: more tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: lots of tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: custom quantiles", + "org.apache.spark.status.AppStatusStoreSuite @ quantile cache", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk leveldb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk rocksdb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory live)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary should not be present if there are no speculative tasks", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.StagePageSuite @ ApiHelper.COLUMN_TO_INDEX should match headers of the task table", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.task.cpus": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing no resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing one resource", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn no dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported k8s with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported standalone with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported with local mode", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager has equivalent profile", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile", + "org.apache.spark.resource.ResourceProfileSuite @ Executor cores should be None by default for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Get resource for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile with app level resources specified", + "org.apache.spark.resource.ResourceProfileSuite @ test default profile task gpus fractional", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor cpus", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting no other resource no executor cores", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting executor cores", + "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover resource profile remaining", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.kryoserializer.buffer": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer supports relocation when auto-reset is enabled", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer does not support relocation when auto-reset is disabled", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ supported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ unsupported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ Test byteString conversion", + "org.apache.spark.SparkConfSuite @ Test timeString conversion", + "org.apache.spark.SparkConfSuite @ loading from system properties", + "org.apache.spark.SparkConfSuite @ initializing without loading defaults", + "org.apache.spark.SparkConfSuite @ named set methods", + "org.apache.spark.SparkConfSuite @ basic get and set", + "org.apache.spark.SparkConfSuite @ basic getAllWithPrefix", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ nested property names", + "org.apache.spark.SparkConfSuite @ Thread safeness - SPARK-5425", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses and custom registrator", + "org.apache.spark.SparkConfSuite @ register kryo classes through conf", + "org.apache.spark.SparkConfSuite @ deprecated configs", + "org.apache.spark.SparkConfSuite @ SPARK-13727", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (java)", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (kryo)", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkConfSuite @ SPARK-26998: SSL configuration not needed on executors", + "org.apache.spark.SparkConfSuite @ SPARK-27244 toDebugString redacts sensitive information", + "org.apache.spark.SparkConfSuite @ SPARK-28355: Use Spark conf for threshold at which UDFs are compressed by broadcast", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default long throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default string throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getDouble throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getInt throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getBoolean throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getLong throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ get task resource requirement from config", + "org.apache.spark.SparkConfSuite @ test task resource requirement with 0 amount", + "org.apache.spark.SparkConfSuite @ Ensure that we can configure fractional resources for a task", + "org.apache.spark.SparkConfSuite @ Non-task resources are never fractional", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.kryo.classesToRegister": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer supports relocation when auto-reset is enabled", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer does not support relocation when auto-reset is disabled", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ supported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ unsupported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ Test byteString conversion", + "org.apache.spark.SparkConfSuite @ Test timeString conversion", + "org.apache.spark.SparkConfSuite @ loading from system properties", + "org.apache.spark.SparkConfSuite @ initializing without loading defaults", + "org.apache.spark.SparkConfSuite @ named set methods", + "org.apache.spark.SparkConfSuite @ basic get and set", + "org.apache.spark.SparkConfSuite @ basic getAllWithPrefix", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ nested property names", + "org.apache.spark.SparkConfSuite @ Thread safeness - SPARK-5425", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses and custom registrator", + "org.apache.spark.SparkConfSuite @ register kryo classes through conf", + "org.apache.spark.SparkConfSuite @ deprecated configs", + "org.apache.spark.SparkConfSuite @ SPARK-13727", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (java)", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (kryo)", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkConfSuite @ SPARK-26998: SSL configuration not needed on executors", + "org.apache.spark.SparkConfSuite @ SPARK-27244 toDebugString redacts sensitive information", + "org.apache.spark.SparkConfSuite @ SPARK-28355: Use Spark conf for threshold at which UDFs are compressed by broadcast", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default long throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default string throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getDouble throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getInt throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getBoolean throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getLong throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ get task resource requirement from config", + "org.apache.spark.SparkConfSuite @ test task resource requirement with 0 amount", + "org.apache.spark.SparkConfSuite @ Ensure that we can configure fractional resources for a task", + "org.apache.spark.SparkConfSuite @ Non-task resources are never fractional", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.driver.bindAddress": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.push.minCompletedPushRatio": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.timeline.executors.maximum": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.executor.cores": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-33530: handles standalone mode with archives", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing no resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing one resource", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn no dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported k8s with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported standalone with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported with local mode", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager has equivalent profile", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile", + "org.apache.spark.resource.ResourceProfileSuite @ Executor cores should be None by default for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Get resource for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile with app level resources specified", + "org.apache.spark.resource.ResourceProfileSuite @ test default profile task gpus fractional", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor cpus", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting no other resource no executor cores", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting executor cores", + "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover resource profile remaining", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.jars": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.SparkSubmitSuite @ specify deploy mode through configuration", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-33530: handles standalone mode with archives", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles legacy standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles mesos client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles k8s cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles confs with flag equivalents", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.SparkSubmitSuite @ resolves command line argument paths correctly", + "org.apache.spark.deploy.SparkSubmitSuite @ resolves config paths correctly", + "org.apache.spark.deploy.SparkSubmitSuite @ support glob path", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-27575: yarn confs should merge new value with existing value", + "org.apache.spark.deploy.SparkSubmitSuite @ Avoid re-upload remote resources in yarn client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ download remote resource if it is not supported by yarn service", + "org.apache.spark.deploy.SparkSubmitSuite @ avoid downloading remote resource if it is supported by yarn service", + "org.apache.spark.deploy.SparkSubmitSuite @ force download from forced schemes", + "org.apache.spark.deploy.SparkSubmitSuite @ force download for all the schemes", + "org.apache.spark.deploy.SparkSubmitSuite @ start SparkApplication without modifying system properties", + "org.apache.spark.deploy.SparkSubmitSuite @ support --py-files/spark.submit.pyFiles in non pyspark application", + "org.apache.spark.deploy.SparkSubmitSuite @ handles natural line delimiters in --properties-file and --conf uniformly", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ named set methods", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.timelineEnabled": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.scheduler.listenerbus.eventqueue.executorManagement.capacity": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.eventLog.enabled": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.kryo.registrationRequired": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer supports relocation when auto-reset is enabled", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer does not support relocation when auto-reset is disabled", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ supported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ unsupported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ Test byteString conversion", + "org.apache.spark.SparkConfSuite @ Test timeString conversion", + "org.apache.spark.SparkConfSuite @ loading from system properties", + "org.apache.spark.SparkConfSuite @ initializing without loading defaults", + "org.apache.spark.SparkConfSuite @ named set methods", + "org.apache.spark.SparkConfSuite @ basic get and set", + "org.apache.spark.SparkConfSuite @ basic getAllWithPrefix", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ nested property names", + "org.apache.spark.SparkConfSuite @ Thread safeness - SPARK-5425", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses and custom registrator", + "org.apache.spark.SparkConfSuite @ register kryo classes through conf", + "org.apache.spark.SparkConfSuite @ deprecated configs", + "org.apache.spark.SparkConfSuite @ SPARK-13727", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (java)", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (kryo)", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkConfSuite @ SPARK-26998: SSL configuration not needed on executors", + "org.apache.spark.SparkConfSuite @ SPARK-27244 toDebugString redacts sensitive information", + "org.apache.spark.SparkConfSuite @ SPARK-28355: Use Spark conf for threshold at which UDFs are compressed by broadcast", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default long throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default string throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getDouble throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getInt throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getBoolean throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getLong throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ get task resource requirement from config", + "org.apache.spark.SparkConfSuite @ test task resource requirement with 0 amount", + "org.apache.spark.SparkConfSuite @ Ensure that we can configure fractional resources for a task", + "org.apache.spark.SparkConfSuite @ Non-task resources are never fractional", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.scheduler.barrier.maxConcurrentTasksCheck.interval": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.timeline.tasks.maximum": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.scheduler.listenerbus.eventqueue.capacity": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Basic event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Spark-33504 sensitive attributes redaction in properties", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Executor metrics update", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation and shutdown of LiveListenerBus", + "org.apache.spark.scheduler.SparkListenerSuite @ bus.stop() waits for the event queue to completely drain", + "org.apache.spark.scheduler.SparkListenerSuite @ metrics for dropped listener events", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ SparkListener moves on if a listener throws an exception", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ add and remove listeners to/from LiveListenerBus queues", + "org.apache.spark.scheduler.SparkListenerSuite @ interrupt within listener is handled correctly: throw interrupt", + "org.apache.spark.scheduler.SparkListenerSuite @ interrupt within listener is handled correctly: set Thread interrupted", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-30285: Fix deadlock in AsyncEventQueue.removeListenerOnError: throw interrupt", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-30285: Fix deadlock in AsyncEventQueue.removeListenerOnError: set Thread interrupted", + "org.apache.spark.scheduler.SparkListenerSuite @ event queue size can be configured through spark conf", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ deprecated configs", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.retainedDeadExecutors": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: 1 task", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: few tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: more tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: lots of tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: custom quantiles", + "org.apache.spark.status.AppStatusStoreSuite @ quantile cache", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk leveldb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk rocksdb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory live)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary should not be present if there are no speculative tasks", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.StagePageSuite @ ApiHelper.COLUMN_TO_INDEX should match headers of the task table", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.kryo.unsafe": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer supports relocation when auto-reset is enabled", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer does not support relocation when auto-reset is disabled", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ supported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ unsupported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ Test byteString conversion", + "org.apache.spark.SparkConfSuite @ Test timeString conversion", + "org.apache.spark.SparkConfSuite @ loading from system properties", + "org.apache.spark.SparkConfSuite @ initializing without loading defaults", + "org.apache.spark.SparkConfSuite @ named set methods", + "org.apache.spark.SparkConfSuite @ basic get and set", + "org.apache.spark.SparkConfSuite @ basic getAllWithPrefix", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ nested property names", + "org.apache.spark.SparkConfSuite @ Thread safeness - SPARK-5425", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses and custom registrator", + "org.apache.spark.SparkConfSuite @ register kryo classes through conf", + "org.apache.spark.SparkConfSuite @ deprecated configs", + "org.apache.spark.SparkConfSuite @ SPARK-13727", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (java)", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (kryo)", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkConfSuite @ SPARK-26998: SSL configuration not needed on executors", + "org.apache.spark.SparkConfSuite @ SPARK-27244 toDebugString redacts sensitive information", + "org.apache.spark.SparkConfSuite @ SPARK-28355: Use Spark conf for threshold at which UDFs are compressed by broadcast", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default long throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default string throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getDouble throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getInt throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getBoolean throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getLong throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ get task resource requirement from config", + "org.apache.spark.SparkConfSuite @ test task resource requirement with 0 amount", + "org.apache.spark.SparkConfSuite @ Ensure that we can configure fractional resources for a task", + "org.apache.spark.SparkConfSuite @ Non-task resources are never fractional", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.storage.unrollMemoryThreshold": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.cleaner.referenceTracking": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.storage.memoryMapThreshold": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ getMergedBlockMeta should return expected MergedBlockMeta", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskStoreSuite @ reads of memory-mapped and non memory-mapped files are equivalent", + "org.apache.spark.storage.DiskStoreSuite @ block size tracking", + "org.apache.spark.storage.DiskStoreSuite @ blocks larger than 2gb", + "org.apache.spark.storage.DiskStoreSuite @ block data encryption", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.liveUpdate.minFlushPeriod": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: 1 task", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: few tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: more tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: lots of tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: custom quantiles", + "org.apache.spark.status.AppStatusStoreSuite @ quantile cache", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk leveldb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk rocksdb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory live)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary should not be present if there are no speculative tasks", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.StagePageSuite @ ApiHelper.COLUMN_TO_INDEX should match headers of the task table", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.stage.maxConsecutiveAttempts": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.executor.metrics.pollingInterval": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.driver.extraJavaOptions": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.files": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.SparkSubmitSuite @ specify deploy mode through configuration", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-33530: handles standalone mode with archives", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles legacy standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles mesos client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles k8s cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles confs with flag equivalents", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.SparkSubmitSuite @ resolves command line argument paths correctly", + "org.apache.spark.deploy.SparkSubmitSuite @ resolves config paths correctly", + "org.apache.spark.deploy.SparkSubmitSuite @ support glob path", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-27575: yarn confs should merge new value with existing value", + "org.apache.spark.deploy.SparkSubmitSuite @ Avoid re-upload remote resources in yarn client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ download remote resource if it is not supported by yarn service", + "org.apache.spark.deploy.SparkSubmitSuite @ avoid downloading remote resource if it is supported by yarn service", + "org.apache.spark.deploy.SparkSubmitSuite @ force download from forced schemes", + "org.apache.spark.deploy.SparkSubmitSuite @ force download for all the schemes", + "org.apache.spark.deploy.SparkSubmitSuite @ start SparkApplication without modifying system properties", + "org.apache.spark.deploy.SparkSubmitSuite @ support --py-files/spark.submit.pyFiles in non pyspark application", + "org.apache.spark.deploy.SparkSubmitSuite @ handles natural line delimiters in --properties-file and --conf uniformly", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.driver.maxResultSize": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.speculation": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.scheduler.mode": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.enabled": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-33530: handles standalone mode with archives", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles legacy standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles mesos client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.appStatusStore.diskStoreDir": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: 1 task", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: few tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: more tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: lots of tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: custom quantiles", + "org.apache.spark.status.AppStatusStoreSuite @ quantile cache", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk leveldb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk rocksdb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory live)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary should not be present if there are no speculative tasks", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.StagePageSuite @ ApiHelper.COLUMN_TO_INDEX should match headers of the task table", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.retainedStages": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: 1 task", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: few tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: more tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: lots of tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: custom quantiles", + "org.apache.spark.status.AppStatusStoreSuite @ quantile cache", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk leveldb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk rocksdb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory live)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary should not be present if there are no speculative tasks", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.StagePageSuite @ ApiHelper.COLUMN_TO_INDEX should match headers of the task table", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.dagGraph.retainedRootRDDs": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithInMemoryStoreSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithLevelDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ environment info", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ scheduler events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage events", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction of old data", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect job completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect stage completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ skipped stages should be evicted before completed stages", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ eviction should respect task completion time", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ lastStageAttempt should fail when the stage doesn't exist", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-24415: update metrics for tasks that finish late", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = true)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ Total tasks in the executor summary should match total stage tasks (live = false)", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ driver logs", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ executor metrics updates", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ stage executor metrics", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ storage information on executor lost/down", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ clean up used memory when BlockManager added", + "org.apache.spark.status.AppStatusListenerWithRocksDBSuite @ SPARK-34877 - check YarnAmInfoEvent is populated correctly", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: 1 task", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: few tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: more tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: lots of tasks", + "org.apache.spark.status.AppStatusStoreSuite @ quantile calculation: custom quantiles", + "org.apache.spark.status.AppStatusStoreSuite @ quantile cache", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk leveldb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = disk rocksdb)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-26260: summary should contain only successful tasks' metrics (store = in memory live)", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary", + "org.apache.spark.status.AppStatusStoreSuite @ SPARK-36038: speculation summary should not be present if there are no speculative tasks", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.StagePageSuite @ ApiHelper.COLUMN_TO_INDEX should match headers of the task table", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.scheduler.barrier.maxConcurrentTasksCheck.maxFailures": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.io.numConnectionsPerPeer": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a random port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to two random ports", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port twice and the second increments", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.scheduler.resource.profileMergeConflicts": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.readHostLocalDisk": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.cleaner.referenceTracking.blocking": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.port": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.memory.storageFraction": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorMetricsPollerSuite @ SPARK-34779: stage entry shouldn't be removed before a heartbeat occurs", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.memory.TestMemoryManagerSuite @ tracks allocated execution memory by task", + "org.apache.spark.memory.TestMemoryManagerSuite @ markconsequentOOM", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ single task requesting on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ two tasks requesting full on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ two tasks cannot grow past 1 / N of on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ tasks can block to get at least 1 / 2N of on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ SPARK-35486: memory freed by self-spilling is taken by another task", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ TaskMemoryManager.cleanUpAllAllocatedMemory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ tasks should not be granted a negative amount of execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ off-heap execution allocations cannot exceed limit", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ basic execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ basic storage memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution evicts storage", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution memory requests smaller than free memory should evict storage (SPARK-12165)", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ storage does not evict execution", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ small heap", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ insufficient executor memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution can evict cached blocks when there are multiple active tasks (SPARK-12155)", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ SPARK-15260: atomically resize memory pools", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ not enough free memory in the storage pool --OFF_HEAP", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.memory.offHeap.enabled": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing no resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing one resource", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorMetricsPollerSuite @ SPARK-34779: stage entry shouldn't be removed before a heartbeat occurs", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.memory.TestMemoryManagerSuite @ tracks allocated execution memory by task", + "org.apache.spark.memory.TestMemoryManagerSuite @ markconsequentOOM", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ single task requesting on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ two tasks requesting full on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ two tasks cannot grow past 1 / N of on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ tasks can block to get at least 1 / 2N of on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ SPARK-35486: memory freed by self-spilling is taken by another task", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ TaskMemoryManager.cleanUpAllAllocatedMemory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ tasks should not be granted a negative amount of execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ off-heap execution allocations cannot exceed limit", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ basic execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ basic storage memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution evicts storage", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution memory requests smaller than free memory should evict storage (SPARK-12165)", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ storage does not evict execution", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ small heap", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ insufficient executor memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution can evict cached blocks when there are multiple active tasks (SPARK-12155)", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ SPARK-15260: atomically resize memory pools", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ not enough free memory in the storage pool --OFF_HEAP", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn no dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported k8s with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported standalone with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported with local mode", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager has equivalent profile", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile", + "org.apache.spark.resource.ResourceProfileSuite @ Executor cores should be None by default for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Get resource for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile with app level resources specified", + "org.apache.spark.resource.ResourceProfileSuite @ test default profile task gpus fractional", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting no other resource no executor cores", + "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover resource profile remaining", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ executorOffHeapMemorySizeAsMb when MEMORY_OFFHEAP_ENABLED is false", + "org.apache.spark.util.UtilsSuite @ executorOffHeapMemorySizeAsMb when MEMORY_OFFHEAP_ENABLED is true" + ], + "spark.port.maxRetries": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.HistoryServerPageSuite @ SPARK-39620: should behaves the same as REST API when filtering applications", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ FileSystemPersistenceEngine", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ ZooKeeperPersistenceEngine", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill application", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill driver", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill one host", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill multiple hosts", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission with multiple masters", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission from main method", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ request submission status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill or request status before create", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ bad request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", + "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher shuts down on valid disassociation", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher stays alive on invalid disassociation", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a random port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to two random ports", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port twice and the second increments", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a RpcEndpointRef", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message abort", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onStart and onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ call receive in sequence", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ stop(RpcEndpointRef) reentrant", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in client mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in client RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: unserializable error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ port conflict", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ construct RpcTimeout with conf property", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout on Future using RpcTimeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ isolated endpoints", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ non-existent endpoint", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ advertise address different from bind address", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ RequestMessage serialization", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ StackOverflowError should be sent back and Dispatcher should survive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-31233: ask rpcEndpointRef in client mode timeout", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ jetty selects different port under contention", + "org.apache.spark.ui.UISuite @ jetty with https selects different port under contention", + "org.apache.spark.ui.UISuite @ jetty binds to port 0 correctly", + "org.apache.spark.ui.UISuite @ jetty with https binds to port 0 correctly", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ add and remove handlers with custom user filter", + "org.apache.spark.ui.UISuite @ SPARK-32467: Avoid encoding URL twice on https redirect", + "org.apache.spark.ui.UISuite @ http -> https redirect applies to all URIs", + "org.apache.spark.ui.UISuite @ specify both http and https ports separately", + "org.apache.spark.ui.UISuite @ redirect with proxy server support", + "org.apache.spark.ui.UISuite @ SPARK-34449: Jetty 9.4.35.v20201120 and later no longer return status code 302 and handle internally when request URL ends with a context path without trailing '/'", + "org.apache.spark.ui.UISuite @ SPARK-34449: default thread pool size of different jetty servers", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.driver.blockManager.port": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.driver.host": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.HistoryServerPageSuite @ SPARK-39620: should behaves the same as REST API when filtering applications", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill application", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill driver", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill one host", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill multiple hosts", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.eventLog.compress": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ events for finished job are dropped in new compact file", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ Don't compact file if score is lower than threshold", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ rewrite files with test filters", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec None", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ create EventLogFileWriter with enable/disable rolling", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Log overwriting", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec None", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - the max size of event log file size less than lower limit", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ create EventLogFileWriter with enable/disable rolling", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Log overwriting", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Spark-33504 sensitive attributes redaction in properties", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Executor metrics update", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.kryoserializer.buffer.max": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer supports relocation when auto-reset is enabled", + "org.apache.spark.serializer.SerializerPropertiesSuite @ KryoSerializer does not support relocation when auto-reset is disabled", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ supported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ unsupported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ Test byteString conversion", + "org.apache.spark.SparkConfSuite @ Test timeString conversion", + "org.apache.spark.SparkConfSuite @ loading from system properties", + "org.apache.spark.SparkConfSuite @ initializing without loading defaults", + "org.apache.spark.SparkConfSuite @ named set methods", + "org.apache.spark.SparkConfSuite @ basic get and set", + "org.apache.spark.SparkConfSuite @ basic getAllWithPrefix", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ nested property names", + "org.apache.spark.SparkConfSuite @ Thread safeness - SPARK-5425", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses and custom registrator", + "org.apache.spark.SparkConfSuite @ register kryo classes through conf", + "org.apache.spark.SparkConfSuite @ deprecated configs", + "org.apache.spark.SparkConfSuite @ SPARK-13727", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (java)", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (kryo)", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkConfSuite @ SPARK-26998: SSL configuration not needed on executors", + "org.apache.spark.SparkConfSuite @ SPARK-27244 toDebugString redacts sensitive information", + "org.apache.spark.SparkConfSuite @ SPARK-28355: Use Spark conf for threshold at which UDFs are compressed by broadcast", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default long throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default string throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getDouble throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getInt throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getBoolean throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getLong throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ get task resource requirement from config", + "org.apache.spark.SparkConfSuite @ test task resource requirement with 0 amount", + "org.apache.spark.SparkConfSuite @ Ensure that we can configure fractional resources for a task", + "org.apache.spark.SparkConfSuite @ Non-task resources are never fractional", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.memory.offHeap.size": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing no resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing one resource", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorMetricsPollerSuite @ SPARK-34779: stage entry shouldn't be removed before a heartbeat occurs", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.memory.TestMemoryManagerSuite @ tracks allocated execution memory by task", + "org.apache.spark.memory.TestMemoryManagerSuite @ markconsequentOOM", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ single task requesting on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ two tasks requesting full on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ two tasks cannot grow past 1 / N of on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ tasks can block to get at least 1 / 2N of on-heap execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ SPARK-35486: memory freed by self-spilling is taken by another task", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ TaskMemoryManager.cleanUpAllAllocatedMemory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ tasks should not be granted a negative amount of execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ off-heap execution allocations cannot exceed limit", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ basic execution memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ basic storage memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution evicts storage", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution memory requests smaller than free memory should evict storage (SPARK-12165)", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ storage does not evict execution", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ small heap", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ insufficient executor memory", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ execution can evict cached blocks when there are multiple active tasks (SPARK-12155)", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ SPARK-15260: atomically resize memory pools", + "org.apache.spark.memory.UnifiedMemoryManagerSuite @ not enough free memory in the storage pool --OFF_HEAP", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn no dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported k8s with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported standalone with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported with local mode", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager has equivalent profile", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile", + "org.apache.spark.resource.ResourceProfileSuite @ Executor cores should be None by default for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Get resource for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile with app level resources specified", + "org.apache.spark.resource.ResourceProfileSuite @ test default profile task gpus fractional", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting no other resource no executor cores", + "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover resource profile remaining", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.MemoryStoreSuite @ reserve/release unroll memory", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsValues off-heap", + "org.apache.spark.storage.MemoryStoreSuite @ safely unroll blocks through putIteratorAsBytes", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.valuesIterator", + "org.apache.spark.storage.MemoryStoreSuite @ PartiallySerializedBlock.finishWritingToStream", + "org.apache.spark.storage.MemoryStoreSuite @ multiple unrolls by the same thread", + "org.apache.spark.storage.MemoryStoreSuite @ lazily create a big ByteBuffer to avoid OOM if it cannot be put into MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ put a small ByteBuffer to MemoryStore", + "org.apache.spark.storage.MemoryStoreSuite @ SPARK-22083: Release all locks in evictBlocksToFreeSpace", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and remove", + "org.apache.spark.storage.MemoryStoreSuite @ put user-defined objects to MemoryStore and clear", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ executorOffHeapMemorySizeAsMb when MEMORY_OFFHEAP_ENABLED is false", + "org.apache.spark.util.UtilsSuite @ executorOffHeapMemorySizeAsMb when MEMORY_OFFHEAP_ENABLED is true" + ], + "spark.dynamicAllocation.enabled": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn no dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported k8s with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported standalone with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported with local mode", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager has equivalent profile", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isDynamicAllocationEnabled" + ], + "spark.serializer.objectStreamReset": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ TorrentBroadcast's blockifyObject and unblockifyObject are inverses", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ FileSystemPersistenceEngine", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ ZooKeeperPersistenceEngine", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission with multiple masters", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission from main method", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ request submission status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill or request status before create", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ bad request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", + "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher shuts down on valid disassociation", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher stays alive on invalid disassociation", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing no resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing one resource", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources resource profile", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required resource profile", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option with resource profile", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.network.netty.NettyBlockRpcServerSuite @ SPARK-38830: Rethrow IllegalArgumentException due to `Unknown message type`", + "org.apache.spark.network.netty.NettyBlockRpcServerSuite @ SPARK-38830: Warn and ignore NegativeArraySizeException due to the corruption", + "org.apache.spark.network.netty.NettyBlockRpcServerSuite @ SPARK-38830: Warn and ignore IndexOutOfBoundsException due to the corruption", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a random port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to two random ports", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port twice and the second increments", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a RpcEndpointRef", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message abort", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onStart and onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ call receive in sequence", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ stop(RpcEndpointRef) reentrant", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in client mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in client RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: unserializable error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ port conflict", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ construct RpcTimeout with conf property", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout on Future using RpcTimeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ isolated endpoints", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ non-existent endpoint", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ advertise address different from bind address", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ RequestMessage serialization", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ StackOverflowError should be sent back and Dispatcher should survive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-31233: ask rpcEndpointRef in client mode timeout", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ MapStatus should never report non-empty blocks' sizes as 0", + "org.apache.spark.scheduler.MapStatusSuite @ HighlyCompressedMapStatus: estimated size should be the average non-empty block size", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-22540: ensure HighlyCompressedMapStatus calculates correct avgSize", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-36967: HighlyCompressedMapStatus should record accurately the size of skewed shuffle blocks", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-36967: Limit accurate skewed block number if too many blocks are skewed", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.JavaSerializerSuite @ JavaSerializer instances are serializable", + "org.apache.spark.serializer.JavaSerializerSuite @ Deserialize object containing a primitive Class as attribute", + "org.apache.spark.serializer.JavaSerializerSuite @ SPARK-36627: Deserialize object containing a proxy Class as attribute", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.SerializerPropertiesSuite @ JavaSerializer does not support relocation", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ A batch of blocks is limited by maxBlocksBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are excluded in the preparation", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in a push request are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-33701: Ensure all the blocks are pushed before notifying driver about push completion", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Basic block push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are skipped for push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in flight per address are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Hit maxBlocksInFlightPerAddress limit so that the blocks are deferred", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of shuffle blocks grouped in a single push request is limited by maxBlockBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error retries", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error logging", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Blocks are continued to push even when a block push fails with collision exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ More blocks are not pushed when a block push fails with too late exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Connect exceptions remove all the push requests for that host", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-36255: FileNotFoundException stops the push", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleManagerSuite @ unsupported shuffle dependencies for serialized shuffle", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ Test byteString conversion", + "org.apache.spark.SparkConfSuite @ Test timeString conversion", + "org.apache.spark.SparkConfSuite @ loading from system properties", + "org.apache.spark.SparkConfSuite @ initializing without loading defaults", + "org.apache.spark.SparkConfSuite @ named set methods", + "org.apache.spark.SparkConfSuite @ basic get and set", + "org.apache.spark.SparkConfSuite @ basic getAllWithPrefix", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ nested property names", + "org.apache.spark.SparkConfSuite @ Thread safeness - SPARK-5425", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses", + "org.apache.spark.SparkConfSuite @ register kryo classes through registerKryoClasses and custom registrator", + "org.apache.spark.SparkConfSuite @ register kryo classes through conf", + "org.apache.spark.SparkConfSuite @ deprecated configs", + "org.apache.spark.SparkConfSuite @ SPARK-13727", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (java)", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (kryo)", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkConfSuite @ SPARK-26998: SSL configuration not needed on executors", + "org.apache.spark.SparkConfSuite @ SPARK-27244 toDebugString redacts sensitive information", + "org.apache.spark.SparkConfSuite @ SPARK-28355: Use Spark conf for threshold at which UDFs are compressed by broadcast", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsSeconds with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default long throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes with default string throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getDouble throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getTimeAsMs with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsBytes throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsGb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getInt throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsMb with default throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getSizeAsKb throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getBoolean throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ SPARK-24337: getLong throws an useful error message with key name", + "org.apache.spark.SparkConfSuite @ get task resource requirement from config", + "org.apache.spark.SparkConfSuite @ test task resource requirement with 0 amount", + "org.apache.spark.SparkConfSuite @ Ensure that we can configure fractional resources for a task", + "org.apache.spark.SparkConfSuite @ Non-task resources are never fractional", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ verify write metrics on revert", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ Reopening a closed block writer", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a partial write should truncate up to commit", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() after commit() should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling revertPartialWritesAndClose() on a closed block writer should have no effect", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ revertPartialWritesAndClose() should be idempotent", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ commit() and close() without ever opening or writing", + "org.apache.spark.storage.DiskBlockObjectWriterSuite @ calling closeAndDelete() on a partial write file", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ valuesIterator() and finishWritingToStream() cannot be called after discard() is called", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ discard() can be called more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() more than once", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call finishWritingToStream() after valuesIterator()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ cannot call valuesIterator() after finishWritingToStream()", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ buffers are deallocated in a TaskCompletionListener", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ basic numbers with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 50", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with discard() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with finishWritingToStream() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ case classes with valuesIterator() and numBuffered = 1000", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with discard() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with finishWritingToStream() and numBuffered = 0", + "org.apache.spark.storage.PartiallySerializedBlockSuite @ empty iterator with valuesIterator() and numBuffered = 0", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ isPushBasedShuffleEnabled when PUSH_BASED_SHUFFLE_ENABLED and SHUFFLE_SERVICE_ENABLED are both set to true in YARN mode with maxAttempts set to 1" + ], + "spark.executor.userClassPathFirst": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing no resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing one resource", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources resource profile", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required resource profile", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option with resource profile", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.executor.heartbeatInterval": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master and app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext without master", + "org.apache.spark.SparkConfSuite @ creating SparkContext without app name", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkConfSuite @ encryption requires authentication", + "org.apache.spark.SparkConfSuite @ spark.network.timeout should bigger than spark.executor.heartbeatInterval", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.ui.timeline.stages.maximum": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.push.maxRetainedMergerLocations": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.cleaner.periodicGC.interval": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.network.io.preferDirectBufs": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceMetricsSuite @ SPARK-31646: metrics should be registered", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ FileSystemPersistenceEngine", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ ZooKeeperPersistenceEngine", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission with multiple masters", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission from main method", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ request submission status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill or request status before create", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ bad request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", + "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher shuts down on valid disassociation", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher stays alive on invalid disassociation", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a random port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to two random ports", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port twice and the second increments", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a RpcEndpointRef", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message abort", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onStart and onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ call receive in sequence", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ stop(RpcEndpointRef) reentrant", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in client mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in client RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: unserializable error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ port conflict", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ construct RpcTimeout with conf property", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout on Future using RpcTimeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ isolated endpoints", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ non-existent endpoint", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ advertise address different from bind address", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ RequestMessage serialization", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ StackOverflowError should be sent back and Dispatcher should survive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-31233: ask rpcEndpointRef in client mode timeout", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.io.retryWait": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a random port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to two random ports", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port twice and the second increments", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.executor.pyspark.memory": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing no resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing one resource", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ parsing multiple resources", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ executor resource found less than required", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn no dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported yarn with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported k8s with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported standalone with dynamic allocation", + "org.apache.spark.resource.ResourceProfileManagerSuite @ isSupported with local mode", + "org.apache.spark.resource.ResourceProfileManagerSuite @ ResourceProfileManager has equivalent profile", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile", + "org.apache.spark.resource.ResourceProfileSuite @ Executor cores should be None by default for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Get resource for standalone cluster", + "org.apache.spark.resource.ResourceProfileSuite @ Default ResourceProfile with app level resources specified", + "org.apache.spark.resource.ResourceProfileSuite @ test default profile task gpus fractional", + "org.apache.spark.resource.ResourceProfileSuite @ maxTasksPerExecutor/limiting no other resource no executor cores", + "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover resource profile remaining", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceMetricsSuite @ SPARK-31646: metrics should be registered", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ No event log files", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ events for finished job are dropped in new compact file", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ Don't compact file if score is lower than threshold", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ rewrite files with test filters", + "org.apache.spark.deploy.history.HistoryServerPageSuite @ SPARK-39620: should behaves the same as REST API when filtering applications", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ history file is renamed from inprogress to completed", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-39439: Check final file if in-progress event log file does not exist", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Parse logs that application is not started", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-5582: empty log directory", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log cleaner", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ should not clean inprogress application with lastUpdated time less than maxTime", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log cleaner for inProgress files", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ Event log copy", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ driver log cleaner", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-8372: new logs with no app ID are ignored", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ provider correctly checks whether fs is in safe mode", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ provider waits for safe mode to finish before initializing", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ provider reports error after FS leaves safe mode", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ ignore hidden files", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ mismatched version discards old listing", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-21571: clean up removes invalid history files", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ always find end event for finished apps", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ parse event logs with optimizations off", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-24948: ignore files we don't have read permission on", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ check in-progress event logs absolute length", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ log cleaner with the maximum number of log files", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-29043: clean up specified event log", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-33215: check ui view permissions without retrieving ui", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = true)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse application logs (inMemory = false)", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-31608: parse application logs with HybridStore", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ history file is renamed from inprogress to completed", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-39439: Check final file if in-progress event log file does not exist", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Parse logs that application is not started", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-5582: empty log directory", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ apps with multiple attempts with order", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log urls without customization", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ custom log urls with invalid attribute", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log cleaner", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ should not clean inprogress application with lastUpdated time less than maxTime", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log cleaner for inProgress files", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ Event log copy", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ driver log cleaner", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-8372: new logs with no app ID are ignored", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ provider correctly checks whether fs is in safe mode", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ provider waits for safe mode to finish before initializing", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ provider reports error after FS leaves safe mode", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ ignore hidden files", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ support history server ui admin acls", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ mismatched version discards old listing", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ invalidate cached UI", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ clean up stale app information", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-21571: clean up removes invalid history files", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ always find end event for finished apps", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ parse event logs with optimizations off", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-24948: ignore files we don't have read permission on", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ check in-progress event logs absolute length", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ log cleaner with the maximum number of log files", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-29043: clean up specified event log", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-33215: check ui view permissions without retrieving ui", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ application list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ completed app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ running app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxDate2 app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minEndDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ minDate and maxEndDate app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ limit app list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one app multi-attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ job list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one job json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ succeeded&failed job list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor list with executor metrics json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ complete stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ failed stage list json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with details", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage attempt json details with failed task", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one stage json with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle write", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w shuffle read", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task summary w/ custom quantiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: -runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & offset & length", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list w/ status & sortBy short names: runtime", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list with partitionId", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with accumulable json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(1)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage task list from multi-attempt app json(2)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ excludeOnFailure node for stage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ rdd list storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor node excludeOnFailure unexcluding", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor memory usage", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ executor resource information", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage list with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with peak metrics", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with summaries", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ one rdd storage json", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ miscellaneous process", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ stage with speculation summary", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download all logs for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ download one log for app with multiple attempts", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ response codes on bad paths", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ automatically retrieve uiRoot from request through Knox", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ static relative links are prefixed with uiRoot (spark.ui.proxyBase)", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ /version api endpoint", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ security manager starts with spark.authenticate set", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ ui and api authorization checks", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-33215: speed up event log download by skipping UI rebuild", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ access history application defaults to the last attempt id", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ SPARK-31697: HistoryServer should set Content-Type", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ Redirect to the root page when accessed to /history/", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ Retrieve EventLogFileReader correctly", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec None", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lz4)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lzf)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(snappy)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ create EventLogFileWriter with enable/disable rolling", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Event log names", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Log overwriting", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec None", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lz4)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lzf)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(snappy)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - the max size of event log file size less than lower limit", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ create EventLogFileWriter with enable/disable rolling", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Log overwriting", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Event log name", + "org.apache.spark.deploy.history.SingleFileEventLogFileReaderSuite @ Retrieve EventLogFileReader correctly", + "org.apache.spark.deploy.JsonProtocolSuite @ writeWorkerState", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ FileSystemPersistenceEngine", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ ZooKeeperPersistenceEngine", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill application", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ kill driver", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill one host", + "org.apache.spark.deploy.master.ui.MasterWebUISuite @ Kill multiple hosts", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission with multiple masters", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission from main method", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ request submission status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill or request status before create", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ bad request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.security.HadoopDelegationTokenManagerSuite @ SPARK-29082: do not fail if current user does not have credentials", + "org.apache.spark.deploy.SparkHadoopUtilSuite @ appendSparkHadoopConfigs with propagation and defaults", + "org.apache.spark.deploy.SparkHadoopUtilSuite @ appendSparkHadoopConfigs with S3A endpoint set to empty string", + "org.apache.spark.deploy.SparkHadoopUtilSuite @ appendSparkHadoopConfigs with S3A options explicitly set", + "org.apache.spark.deploy.SparkHadoopUtilSuite @ appendSparkHadoopConfigs with S3A endpoint region set to an empty string", + "org.apache.spark.deploy.SparkSubmitSuite @ handle binary specified but not class", + "org.apache.spark.deploy.SparkSubmitSuite @ specify deploy mode through configuration", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-33530: handles standalone mode with archives", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles legacy standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles mesos client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles k8s cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ error informatively when mainClass isn't set and S3 JAR doesn't exist", + "org.apache.spark.deploy.SparkSubmitSuite @ handles confs with flag equivalents", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.SparkSubmitSuite @ resolves command line argument paths correctly", + "org.apache.spark.deploy.SparkSubmitSuite @ ambiguous archive mapping results in error message", + "org.apache.spark.deploy.SparkSubmitSuite @ resolves config paths correctly", + "org.apache.spark.deploy.SparkSubmitSuite @ support glob path", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-27575: yarn confs should merge new value with existing value", + "org.apache.spark.deploy.SparkSubmitSuite @ start SparkApplication without modifying system properties", + "org.apache.spark.deploy.SparkSubmitSuite @ handles natural line delimiters in --properties-file and --conf uniformly", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.CommandUtilsSuite @ set libraryPath correctly", + "org.apache.spark.deploy.worker.CommandUtilsSuite @ auth secret shouldn't appear in java opts", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Process succeeds instantly", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Process failing several times and then succeeding", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Process doesn't restart if not supervised", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Process doesn't restart if killed", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Reset of backoff counter", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Kill process finalized with state KILLED", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Finalized with state FINISHED", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Finalized with state FAILED", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Handle exception starting process", + "org.apache.spark.deploy.worker.ExecutorRunnerTest @ command includes appId", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", + "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher shuts down on valid disassociation", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher stays alive on invalid disassociation", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with default config", + "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with sources add", + "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Driver instance", + "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Driver instance and spark.app.id is not set", + "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Driver instance and spark.executor.id is not set", + "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Executor instance", + "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Executor instance and spark.app.id is not set", + "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with Executor instance and spark.executor.id is not set", + "org.apache.spark.metrics.MetricsSystemSuite @ MetricsSystem with instance which is neither Driver nor Executor", + "org.apache.spark.metrics.MetricsSystemSuite @ SPARK-37078: Support old 3-parameter Sink constructors", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a random port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to two random ports", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ can bind to a specific port twice and the second increments", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a RpcEndpointRef", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message abort", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onStart and onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ call receive in sequence", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ stop(RpcEndpointRef) reentrant", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in client mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in client RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: unserializable error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ port conflict", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ construct RpcTimeout with conf property", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout on Future using RpcTimeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ isolated endpoints", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ non-existent endpoint", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ advertise address different from bind address", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ RequestMessage serialization", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ StackOverflowError should be sent back and Dispatcher should survive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-31233: ask rpcEndpointRef in client mode timeout", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Basic event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Event logging with password redaction", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Spark-33504 sensitive attributes redaction in properties", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Executor metrics update", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ Simple replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ Replay compressed inprogress log file succeeding on partial read", + "org.apache.spark.scheduler.ReplayListenerSuite @ Replay incompatible event log", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.SecurityManagerSuite @ set security with conf", + "org.apache.spark.SecurityManagerSuite @ set security with conf for groups", + "org.apache.spark.SecurityManagerSuite @ set security with api", + "org.apache.spark.SecurityManagerSuite @ set security with api for groups", + "org.apache.spark.SecurityManagerSuite @ set security modify acls", + "org.apache.spark.SecurityManagerSuite @ set security modify acls for groups", + "org.apache.spark.SecurityManagerSuite @ set security admin acls", + "org.apache.spark.SecurityManagerSuite @ set security admin acls for groups", + "org.apache.spark.SecurityManagerSuite @ set security with * in acls", + "org.apache.spark.SecurityManagerSuite @ set security with * in acls for groups", + "org.apache.spark.SecurityManagerSuite @ security for groups default behavior", + "org.apache.spark.SecurityManagerSuite @ missing secret authentication key", + "org.apache.spark.SecurityManagerSuite @ secret authentication key", + "org.apache.spark.SecurityManagerSuite @ secret file must be defined in both driver and executor", + "org.apache.spark.SecurityManagerSuite @ master yarn cannot use file mounted secrets", + "org.apache.spark.SecurityManagerSuite @ master local cannot use file mounted secrets", + "org.apache.spark.SecurityManagerSuite @ master local[*] cannot use file mounted secrets", + "org.apache.spark.SecurityManagerSuite @ master mesos://localhost:8080 cannot use file mounted secrets", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'yarn'", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'local'", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'local[*]'", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'k8s://127.0.0.1'", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'k8s://127.0.1.1'", + "org.apache.spark.SecurityManagerSuite @ secret key generation: master 'invalid'", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.DiskStoreSuite @ reads of memory-mapped and non memory-mapped files are equivalent", + "org.apache.spark.storage.DiskStoreSuite @ block size tracking", + "org.apache.spark.storage.DiskStoreSuite @ blocks larger than 2gb", + "org.apache.spark.storage.DiskStoreSuite @ block data encryption", + "org.apache.spark.storage.FallbackStorageSuite @ fallback storage APIs - copy/exists", + "org.apache.spark.storage.FallbackStorageSuite @ SPARK-39200: fallback storage APIs - readFully", + "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.HttpSecurityFilterSuite @ filter bad user input", + "org.apache.spark.ui.HttpSecurityFilterSuite @ perform access control", + "org.apache.spark.ui.HttpSecurityFilterSuite @ set security-related headers", + "org.apache.spark.ui.HttpSecurityFilterSuite @ doAs impersonation", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ jetty selects different port under contention", + "org.apache.spark.ui.UISuite @ jetty with https selects different port under contention", + "org.apache.spark.ui.UISuite @ jetty binds to port 0 correctly", + "org.apache.spark.ui.UISuite @ jetty with https binds to port 0 correctly", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ add and remove handlers with custom user filter", + "org.apache.spark.ui.UISuite @ SPARK-32467: Avoid encoding URL twice on https redirect", + "org.apache.spark.ui.UISuite @ http -> https redirect applies to all URIs", + "org.apache.spark.ui.UISuite @ specify both http and https ports separately", + "org.apache.spark.ui.UISuite @ redirect with proxy server support", + "org.apache.spark.ui.UISuite @ SPARK-34449: Jetty 9.4.35.v20201120 and later no longer return status code 302 and handle internally when request URL ends with a context path without trailing '/'", + "org.apache.spark.ui.UISuite @ SPARK-34449: default thread pool size of different jetty servers", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing", + "org.apache.spark.util.UtilsSuite @ Set Spark CallerContext" + ], + "spark.ui.showConsoleProgress": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.rpc.message.maxSize": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Gracefully handle error in task deserialization", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.executor.ExecutorSuite @ SPARK-40235: updateDependencies is interruptible when waiting on lock", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ min broadcast size exceeds max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ equally divide map statistics tasks", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.sort.io.plugin.class": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.push.finalize.timeout": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.rpc.lookupTimeout": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ FileSystemPersistenceEngine", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ ZooKeeperPersistenceEngine", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission with multiple masters", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission from main method", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ request submission status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill or request status before create", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ bad request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", + "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher shuts down on valid disassociation", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher stays alive on invalid disassociation", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a RpcEndpointRef", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message abort", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onStart and onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ call receive in sequence", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ stop(RpcEndpointRef) reentrant", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in client mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in client RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: unserializable error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ port conflict", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ construct RpcTimeout with conf property", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout on Future using RpcTimeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ isolated endpoints", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ non-existent endpoint", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ advertise address different from bind address", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ RequestMessage serialization", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ StackOverflowError should be sent back and Dispatcher should survive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-31233: ask rpcEndpointRef in client mode timeout", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.rpc.askTimeout": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ FileSystemPersistenceEngine", + "org.apache.spark.deploy.master.PersistenceEngineSuite @ ZooKeeperPersistenceEngine", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission with multiple masters", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission from main method", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ request submission status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ kill or request status before create", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ bad request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", + "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher shuts down on valid disassociation", + "org.apache.spark.deploy.worker.WorkerWatcherSuite @ WorkerWatcher stays alive on invalid disassociation", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ track allocated resources by taskId", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ master start and stop", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and fetch", + "org.apache.spark.MapOutputTrackerSuite @ master register and unregister shuffle", + "org.apache.spark.MapOutputTrackerSuite @ master register shuffle and unregister map output and fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ getLocationsWithLargestOutputs with multiple outputs in same machine", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ zero-sized blocks should be excluded when getMapSizesByExecutorId", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: master register and unregister merge result", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-39553: Multi-thread unregister shuffle shouldn't throw NPE", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a RpcEndpointRef", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message locally", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message abort", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onStart and onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ onError: error in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStart", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in receive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ self: call in onStop", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ call receive in sequence", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ stop(RpcEndpointRef) reentrant", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in client mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in client RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: unserializable error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ port conflict", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ construct RpcTimeout with conf property", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout on Future using RpcTimeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ isolated endpoints", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ non-existent endpoint", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ advertise address different from bind address", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ RequestMessage serialization", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ StackOverflowError should be sent back and Dispatcher should survive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-31233: ask rpcEndpointRef in client mode timeout", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerMasterSuite @ SPARK-31422: getMemoryStatus should not fail after BlockManagerMaster stops", + "org.apache.spark.storage.BlockManagerMasterSuite @ SPARK-31422: getStorageStatus should not fail after BlockManagerMaster stops", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-36036: make sure temporary download files are deleted", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: count failures from active executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32091: ignore failures from lost executors when remove rdd/broadcast/shuffle", + "org.apache.spark.storage.BlockManagerSuite @ StorageLevel object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId object caching", + "org.apache.spark.storage.BlockManagerSuite @ BlockManagerId.isDriver() with DRIVER_IDENTIFIER (SPARK-27090)", + "org.apache.spark.storage.BlockManagerSuite @ master + 1 manager interaction", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ removing block", + "org.apache.spark.storage.BlockManagerSuite @ removing rdd", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on heart beat", + "org.apache.spark.storage.BlockManagerSuite @ reregistration on block update", + "org.apache.spark.storage.BlockManagerSuite @ reregistration doesn't dead lock", + "org.apache.spark.storage.BlockManagerSuite @ correct BlockResult returned from get() calls", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks without topology information", + "org.apache.spark.storage.BlockManagerSuite @ optimize a location order of blocks with topology information", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with serialization", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU storage with off-heap", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of same RDD", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU for partitions of multiple RDDs", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ on-disk storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and memory storage with serialization and getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ disk and off-heap memory storage with getLocalBytes (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ in-memory LRU with streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ LRU with mixed storage levels and streams (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ negative byte values in ByteBufferInputStream", + "org.apache.spark.storage.BlockManagerSuite @ overly large block", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ block store put failure", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = off)", + "org.apache.spark.storage.BlockManagerSuite @ test putBlockDataAsStream with caching on disk (encryption = on)", + "org.apache.spark.storage.BlockManagerSuite @ turn off updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ updated block statuses", + "org.apache.spark.storage.BlockManagerSuite @ query block statuses", + "org.apache.spark.storage.BlockManagerSuite @ get matching blocks", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-1194 regression: fix the same-RDD rule for cache replacement", + "org.apache.spark.storage.BlockManagerSuite @ safely unroll blocks through putIterator (disk)", + "org.apache.spark.storage.BlockManagerSuite @ read-locked blocks cannot be evicted from memory", + "org.apache.spark.storage.BlockManagerSuite @ remove block if a read fails due to missing DiskStore files (SPARK-15736)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should fail after hitting a threshold)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-13328: refresh block locations (fetch should succeed after location refresh)", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: block status is properly updated following an exception in put()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-25888: serving of removed file not detected by shuffle service", + "org.apache.spark.storage.BlockManagerSuite @ test sorting of block locations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ fetch remote block to local disk if block size is larger than threshold", + "org.apache.spark.storage.BlockManagerSuite @ query locations of blockIds", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-30594: Do not post SparkListenerBlockUpdated when updateBlockInfo returns false", + "org.apache.spark.storage.BlockManagerSuite @ we reject putting blocks when we have the wrong shuffle resolver", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Shuffle push merger locations should be bounded with in spark.shuffle.push.retainedMergerLocations", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-32919: Prefer active executor locations for shuffle push mergers", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when getting disk blocks and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ check KryoException when saving blocks into memory and 'Input/output error' is occurred", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ fallback storage APIs - copy/exists", + "org.apache.spark.storage.FallbackStorageSuite @ SPARK-39200: fallback storage APIs - readFully", + "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.executor.extraClassPath": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.archives": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.SparkSubmitSuite @ specify deploy mode through configuration", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-33530: handles standalone mode with archives", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles legacy standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles mesos client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles k8s cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles confs with flag equivalents", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.SparkSubmitSuite @ resolves command line argument paths correctly", + "org.apache.spark.deploy.SparkSubmitSuite @ resolves config paths correctly", + "org.apache.spark.deploy.SparkSubmitSuite @ support glob path", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-27575: yarn confs should merge new value with existing value", + "org.apache.spark.deploy.SparkSubmitSuite @ Avoid re-upload remote resources in yarn client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ download remote resource if it is not supported by yarn service", + "org.apache.spark.deploy.SparkSubmitSuite @ avoid downloading remote resource if it is supported by yarn service", + "org.apache.spark.deploy.SparkSubmitSuite @ force download from forced schemes", + "org.apache.spark.deploy.SparkSubmitSuite @ force download for all the schemes", + "org.apache.spark.deploy.SparkSubmitSuite @ start SparkApplication without modifying system properties", + "org.apache.spark.deploy.SparkSubmitSuite @ support --py-files/spark.submit.pyFiles in non pyspark application", + "org.apache.spark.deploy.SparkSubmitSuite @ handles natural line delimiters in --properties-file and --conf uniformly", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ Test parsing resources task configs with missing executor config", + "org.apache.spark.SparkContextSuite @ Test parsing resources executor config < task requirements", + "org.apache.spark.SparkContextSuite @ Parse resources executor config not the same multiple numbers of the task requirements", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.extraListeners": [ + "org.apache.spark.api.python.PythonBroadcastSuite @ PythonBroadcast can be serialized with Kryo (SPARK-4882)", + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with partial partitions", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with union()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with coalesce()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that contains an RDD that depends on multiple barrier RDDs", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Broadcast variables cannot be created after SparkContext is stopped (SPARK-5065)", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ failure because task closure is not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.HeartbeatReceiverSuite @ task scheduler is set correctly", + "org.apache.spark.HeartbeatReceiverSuite @ normal heartbeat", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if scheduler is not ready yet", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from unregistered executor", + "org.apache.spark.HeartbeatReceiverSuite @ reregister if heartbeat from removed executor", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.HeartbeatReceiverSuite @ SPARK-34273: Do not reregister BlockManager when SparkContext is stopped", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization and communication", + "org.apache.spark.internal.plugin.PluginContainerSuite @ merging of config options", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.ChunkedByteBufferSuite @ no chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ getChunks() duplicates chunks", + "org.apache.spark.io.ChunkedByteBufferSuite @ copy() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ writeFully() does not affect original buffer's position", + "org.apache.spark.io.ChunkedByteBufferSuite @ SPARK-24107: writeFully() write buffer which is larger than bufferWriteChunkSize", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray()", + "org.apache.spark.io.ChunkedByteBufferSuite @ toArray() throws UnsupportedOperationException if size exceeds 2GB", + "org.apache.spark.io.ChunkedByteBufferSuite @ toInputStream()", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping static sources registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for adding ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ Test configuration for skipping ExecutorMetrics source registration", + "org.apache.spark.metrics.source.SourceConfigSuite @ SPARK-31711: Test executor source registration in local mode", + "org.apache.spark.PartitioningSuite @ HashPartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner.determineBounds", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ partitioner preservation", + "org.apache.spark.PartitioningSuite @ partitioning Java arrays should fail", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ transform storage level", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses partition size", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions inherit locality prefs correctly", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDBarrierSuite @ create an RDDBarrier in the middle of a chain of RDDs", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier with shuffle", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDOperationScopeSuite @ equals and hashCode", + "org.apache.spark.rdd.RDDOperationScopeSuite @ getAllScopes", + "org.apache.spark.rdd.RDDOperationScopeSuite @ json de/serialization", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with partial nesting", + "org.apache.spark.rdd.RDDOperationScopeSuite @ withScope with multiple layers of nesting", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ serialization", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ UnionRDD partition serialized size should be small", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with limit 0", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ runJob on an invalid partition", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ SPARK-32384: repartitionAndSortWithinPartitions without shuffle", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ parent method", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with cycles", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-23496: order of input partitions can result in severe skew in coalesce", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ cannot call methods on a stopped SparkContext (SPARK-5063)", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ExternalClusterManagerSuite @ launch of backend and scheduler", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only authorized committer failures can clear the authorized committer lock (SPARK-6614)", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-19631: Do not allow failed attempts to be authorized for committing", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Differentiate tasks from different stage attempts", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ SPARK-17663: FairSchedulableBuilder sets default values for blank or invalid datas", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should build fair scheduler when valid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should use default file(fairscheduler.xml) if it exists in classpath and spark.scheduler.allocation.file property is not set", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler should throw FileNotFoundException when invalid spark.scheduler.allocation.file property is set", + "org.apache.spark.scheduler.PoolSuite @ SPARK-35083: Support remote scheduler pool file", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ failed tasks collect only accumulators whose values count during failures", + "org.apache.spark.scheduler.TaskContextSuite @ only updated internal accumulators will be sent back to driver", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ calling deserialize() after deserializeStream()", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ SPARK-25786: ByteBuffer.array -- UnsupportedOperationException", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.KryoSerializerSuite @ basic types", + "org.apache.spark.serializer.KryoSerializerSuite @ pairs", + "org.apache.spark.serializer.KryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.KryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.KryoSerializerSuite @ ranges", + "org.apache.spark.serializer.KryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.KryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.KryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.KryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.KryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.KryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.KryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-7392 configuration limits", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ basic types", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ pairs", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Scala data structures", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ Bug: SPARK-10251", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ ranges", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ asJavaIterable", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ custom registrator", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with nonexistent custom registrator should fail", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ default class loader can be set by a different thread", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of TaskCommitMessage", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ serialization buffer overflow reporting", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ KryoOutputObjectOutputBridge.writeObject and KryoInputObjectInputBridge.readObject", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ getAutoReset", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25176 ClassCastException when writing a Map after previously reading a Map with different generic type", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-25839 KryoPool implementation works correctly in multi-threaded environment", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-27216: test RoaringBitmap ser/dser with Kryo", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ SPARK-37071: OpenHashMap serialize with reference tracking turned off", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDependencySuite @ combineByKey null combiner class tag handled correctly", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ creating SparkContext with both master and app name", + "org.apache.spark.SparkConfSuite @ SparkContext property overriding", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs only returns RDDs that are marked as cached", + "org.apache.spark.SparkContextInfoSuite @ getPersistentRDDs returns an immutable map", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextInfoSuite @ call sites report correct locations", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-master", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-*-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n", + "org.apache.spark.SparkContextSchedulerCreationSuite @ bad-local-n-failures", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ Only one SparkContext may be active at a time", + "org.apache.spark.SparkContextSuite @ Can still construct a new SparkContext after failing to construct a previous one", + "org.apache.spark.SparkContextSuite @ Test getOrCreate", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ add and list jar files", + "org.apache.spark.SparkContextSuite @ add FS jar files not exists", + "org.apache.spark.SparkContextSuite @ SPARK-17650: malformed url's throw exceptions before bricking Executors", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ addFile recursive can't add directories by default", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-30126: add jar when path contains spaces", + "org.apache.spark.SparkContextSuite @ add jar with invalid path", + "org.apache.spark.SparkContextSuite @ SPARK-22585 addJar argument without scheme is interpreted literally without url decoding", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ log level case-insensitive and reset log level", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34346: hadoop configuration priority for spark/hive/hadoop configs", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35383: Fill missing S3A magic committer configs if needed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ get peers with addition and removal of block managers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication without peers", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ThreadingSuite @ set local properties in different thread", + "org.apache.spark.ThreadingSuite @ set and get local properties in parent-children thread", + "org.apache.spark.ThreadingSuite @ mutation in parent local property does not affect child (SPARK-10563)", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ attaching and detaching a new tab", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ live UI json application list", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ description for empty jobs", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.ui.UISuite @ verify webUrl contains the scheme", + "org.apache.spark.ui.UISuite @ verify webUrl contains the port", + "org.apache.spark.ui.UISuite @ SPARK-36237: Attach and start handler after application started in UI", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean basic nested non-serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested serializable closures", + "org.apache.spark.util.ClosureCleanerSuite2 @ clean complicated nested non-serializable closures", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.io.compression.lz4.blockSize": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lz4)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lz4)", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.CompressionCodecSuite @ default compression codec", + "org.apache.spark.io.CompressionCodecSuite @ lz4 compression codec", + "org.apache.spark.io.CompressionCodecSuite @ lz4 compression codec short form", + "org.apache.spark.io.CompressionCodecSuite @ lz4 supports concatenation of serialized streams", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Basic event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.ReplayListenerSuite @ Replay compressed inprogress log file succeeding on partial read", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkConfSuite @ SPARK-13727", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.log.callerContext": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.files.ignoreCorruptFiles": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)" + ], + "spark.broadcast.checksum": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.io.compression.codec": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.io.CompressionCodecSuite @ default compression codec", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ serializer manager integration", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ schema compression and decompression", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ uses schema fingerprint to decrease message size", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ caches previously seen schemas", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-39775: Disable validate default values when parsing Avro schemas", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerSuite @ removing broadcast", + "org.apache.spark.storage.BlockManagerSuite @ block compression", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-34193: Potential race condition during decommissioning with TorrentBroadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.speculation.multiplier": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.speculation.efficiency.longRunTaskFactor": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.eventLog.longForm.enabled": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.hadoop.validateOutputSpecs": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()" + ], + "spark.hadoop.cloneConf": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)" + ], + "spark.speculation.quantile": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.default.parallelism": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Forbid broadcasting RDD directly", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.ImplicitOrderingSuite @ basic inference of Orderings", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.PartitioningSuite @ defaultPartitioner", + "org.apache.spark.PartitioningSuite @ defaultPartitioner when defaultParallelism is set", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidBucketArray", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates UnionRDD if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners", + "org.apache.spark.rdd.RDDSuite @ PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner", + "org.apache.spark.rdd.RDDSuite @ SPARK-23778: empty RDD in union should not produce a UnionRDD", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ sample preserves partitioner", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ cartesian on empty RDD", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ cannot run actions after SparkContext has been stopped (SPARK-5063)", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ throws expected serialization exceptions on actions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitions transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ map transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ filter transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ flatMap transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.ProactiveClosureSerializationSuite @ mapPartitionsWithIndex transformations throw proactive serialization exceptions", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-default-parallelism", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ toplevel return statements in closures are identified at cleaning time", + "org.apache.spark.util.ClosureCleanerSuite @ return statements from named functions nested in closures don't raise exceptions", + "org.apache.spark.util.ClosureCleanerSuite @ user provided closures are actually cleaned", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Persisting", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.speculation.task.duration.threshold": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.excludeOnFailure.enabled": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.files.ignoreMissingFiles": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)" + ], + "spark.speculation.efficiency.processRateMultiplier": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling total size of results larger than maxResultSize", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.broadcast.blockSize": [ + "org.apache.spark.api.python.PythonRDDSuite @ SparkContext's hadoop configuration should be respected in PythonRDD", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty pair RDD to python does not throw an exception (SPARK-5441)", + "org.apache.spark.api.python.SerDeUtilSuite @ Converting an empty python RDD to pair RDD does not throw an exception (SPARK-5441)", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.broadcast.BroadcastSuite @ Using TorrentBroadcast locally", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables from multiple threads", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in local mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Using broadcast after destroy prints callsite", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Cache broadcast to disk (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor", + "org.apache.spark.broadcast.BroadcastSuite @ One broadcast value instance per executor when memory is constrained", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically clean up local checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions", + "org.apache.spark.executor.ExecutorSuite @ Executor's worker threads should be UninterruptibleThread", + "org.apache.spark.executor.ExecutorSuite @ SPARK-19276: OOMs correctly handled with a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-23816: interrupts are not masked by a FetchFailure", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.FailureSuite @ failure in a single-stage job", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure because task results are not serializable", + "org.apache.spark.FailureSuite @ managed memory leak error should not mask other failures (SPARK-9266", + "org.apache.spark.FailureSuite @ last failure cause is sent back to driver", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not serializable", + "org.apache.spark.FailureSuite @ failure cause stacktrace is sent back to driver if exception is not deserializable", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FailureSuite @ SPARK-16304: Link error should not crash executor", + "org.apache.spark.FileSuite @ text files", + "org.apache.spark.FileSuite @ text files (compressed)", + "org.apache.spark.FileSuite @ text files do not allow null rows", + "org.apache.spark.FileSuite @ SequenceFiles", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - default", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - bzip2", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - snappy", + "org.apache.spark.FileSuite @ SequenceFile (compressed) - lz4", + "org.apache.spark.FileSuite @ SequenceFile with writable key", + "org.apache.spark.FileSuite @ SequenceFile with writable value", + "org.apache.spark.FileSuite @ SequenceFile with writable key and value", + "org.apache.spark.FileSuite @ implicit conversions in reading SequenceFiles", + "org.apache.spark.FileSuite @ object files of ints", + "org.apache.spark.FileSuite @ object files of complex types", + "org.apache.spark.FileSuite @ object files of classes from a JAR", + "org.apache.spark.FileSuite @ write SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ read SequenceFile using new Hadoop API", + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.FileSuite @ fixed record length binary file as byte array", + "org.apache.spark.FileSuite @ negative binary record length should raise an exception", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (old Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (old Hadoop API)", + "org.apache.spark.FileSuite @ prevent user from overwriting the non-empty directory (new Hadoop API)", + "org.apache.spark.FileSuite @ allow user to disable the output directory existence checking (new Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through old Hadoop API", + "org.apache.spark.FileSuite @ save Hadoop Dataset through new Hadoop API", + "org.apache.spark.FileSuite @ Get input files via old Hadoop API", + "org.apache.spark.FileSuite @ Get input files via new Hadoop API", + "org.apache.spark.FileSuite @ spark.files.ignoreCorruptFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (old Hadoop API)", + "org.apache.spark.FileSuite @ spark.hadoopRDD.ignoreEmptySplits work correctly (new Hadoop API)", + "org.apache.spark.FileSuite @ spark.files.ignoreMissingFiles should work both HadoopRDD and NewHadoopRDD", + "org.apache.spark.FileSuite @ SPARK-25100: Support commit tasks when Kyro registration is required", + "org.apache.spark.FutureActionSuite @ simple async action", + "org.apache.spark.FutureActionSuite @ complex async action", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor tasks trigger plugin calls", + "org.apache.spark.internal.plugin.PluginContainerSuite @ SPARK-33088: executor failed tasks trigger plugin calls", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.PartitioningSuite @ RangePartitioner equality", + "org.apache.spark.PartitioningSuite @ RangePartitioner getPartition", + "org.apache.spark.PartitioningSuite @ RangePartitioner for keys that are not Comparable (but with Ordering)", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.PartitioningSuite @ RangePartitioner should return a single partition for empty RDDs", + "org.apache.spark.PartitioningSuite @ HashPartitioner not equal to RangePartitioner", + "org.apache.spark.PartitioningSuite @ zero-length partitions should be correctly handled", + "org.apache.spark.PartitioningSuite @ Number of elements in RDD is less than number of partitions", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ countAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ collectAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ foreachPartitionAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ takeAsync", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async success handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ async failure handling", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ SimpleFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.AsyncRDDActionsSuite @ ComplexFutureAction callback must not consume a thread while waiting", + "org.apache.spark.rdd.DoubleRDDSuite @ sum", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksOnEmpty", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucket", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithOneBucketExactMatch", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithTwoUnEvenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksInRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithTwoUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithFourUnevenBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaN", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksMixedRangeWithUnevenBucketsAndNaNAndNaNRangeAndInfinity", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithOutOfRangeWithInfiniteBuckets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasic", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicSingleElement", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicNoRange", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsBasicTwo", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithDoubleValuesAtMinMax", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithMoreRequestedThanElements", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsForLargerDatasets", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithoutBucketsWithNonIntegralBucketEdges", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.DoubleRDDSuite @ ThrowsExceptionOnInvalidRDDs", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint blocks exist - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ zero-partition RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ keys and values", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveNewAPIHadoopFile should call setConf if format is configurable", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ The JobId on the driver and executors should be the same during the commit", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopFile should respect configured output committers", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ failure callbacks should be called before calling writer.close() in saveAsHadoopFile", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsNewAPIHadoopDataset should support invalid output paths when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ saveAsHadoopDataset should respect empty output directory when there are no files to be committed to an absolute output location", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionPruningRDDSuite @ Pruned Partitions can be unioned", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ concurrency", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with tokenization", + "org.apache.spark.rdd.PipedRDDSuite @ failure in iterating over pipe input", + "org.apache.spark.rdd.PipedRDDSuite @ stdin writer thread should be exited when task is finished", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with empty partition", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with env variable", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which cannot be launched due to bad command", + "org.apache.spark.rdd.PipedRDDSuite @ pipe with process which is launched but fails with non-zero exit status", + "org.apache.spark.rdd.PipedRDDSuite @ basic pipe with separate working directory", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports map_input_file", + "org.apache.spark.rdd.PipedRDDSuite @ test pipe exports mapreduce_map_input_file", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ countApproxDistinct", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union", + "org.apache.spark.rdd.RDDSuite @ SparkContext.union parallel partition listing", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ fold", + "org.apache.spark.rdd.RDDSuite @ fold with op modifying first arg", + "org.apache.spark.rdd.RDDSuite @ aggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ caching with failures", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with locality", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs with partial locality", + "org.apache.spark.rdd.RDDSuite @ zipped RDDs", + "org.apache.spark.rdd.RDDSuite @ partition pruning", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ take", + "org.apache.spark.rdd.RDDSuite @ top with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ top with custom ordering", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with predefined ordering", + "org.apache.spark.rdd.RDDSuite @ SPARK-40276: takeOrdered with empty RDDs", + "org.apache.spark.rdd.RDDSuite @ takeOrdered with custom ordering", + "org.apache.spark.rdd.RDDSuite @ isEmpty", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.rdd.RDDSuite @ takeSample from an empty rdd", + "org.apache.spark.rdd.RDDSuite @ randomSplit", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ cartesian on non-empty RDDs", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex with a single partition", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ retag with implicit ClassTag", + "org.apache.spark.rdd.RDDSuite @ task serialization exception should not hang scheduler", + "org.apache.spark.rdd.RDDSuite @ nested RDDs are not supported (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ actions cannot be performed inside of transformations (SPARK-5063)", + "org.apache.spark.rdd.RDDSuite @ custom RDD coalescer", + "org.apache.spark.rdd.RDDSuite @ SPARK-18406: race between end-of-task and completion iterator read lock release", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in an array not partitioned by a range partitioner", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.rdd.ZippedPartitionsSuite @ print sizes", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorIntegrationSuite @ SPARK-39195: exception thrown in OutputCommitter.commitTask()", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Only one of two duplicate commit tasks should commit", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ Job should not complete if all commits are denied", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskContextSuite @ provide metrics sources", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskCompletionListener after failure", + "org.apache.spark.scheduler.TaskContextSuite @ calls TaskFailureListeners after failure", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskContextSuite @ accumulators are updated on exception failures", + "org.apache.spark.scheduler.TaskContextSuite @ localProperties are propagated to executors correctly", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results smaller than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task deserialized with the correct classloader (SPARK-11195)", + "org.apache.spark.scheduler.TaskResultGetterSuite @ failed task is handled when error occurs deserializing the reason", + "org.apache.spark.scheduler.TaskResultGetterSuite @ SPARK-40261: task result metadata should not be counted into result size", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Record serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Array serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.EnumSymbol serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.GenericAvroSerializerSuite @ SPARK-34477: GenericData.Fixed serialization and deserialization through KryoSerializer", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo without resizable output buffer should fail on large array", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.KryoSerializerSuite @ kryo with fold", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with parallelize for primitive arrays", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with collect for specialized tuples", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with SerializableHyperLogLog", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with reduce", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ kryo with fold", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextInfoSuite @ getRDDStorageInfo only reports on RDDs that actually persist data", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)", + "org.apache.spark.SparkContextSuite @ Default path for file based RDDs is properly set (SPARK-12517)", + "org.apache.spark.SparkContextSuite @ calling multiple sc.stop() must not throw any exception", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup() with takeAsync() across multiple partitions", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.storage.FlatmapIteratorSuite @ Serializer Reset", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form a different thread", + "org.apache.spark.ThreadingSuite @ accessing SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ accessing multi-threaded SparkContext form multiple threads", + "org.apache.spark.ThreadingSuite @ parallel job execution", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ effects of unpersist() / persist() should be reflected", + "org.apache.spark.ui.UISeleniumSuite @ failed stages should not appear to be active", + "org.apache.spark.ui.UISeleniumSuite @ spark.ui.killEnabled should properly control kill button display", + "org.apache.spark.ui.UISeleniumSuite @ jobs page should not display job group name unless some job was submitted in a job group", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ kill stage POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ kill job POST/GET response is correct", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.ui.UISeleniumSuite @ Support disable event timeline", + "org.apache.spark.UnpersistSuite @ unpersist RDD", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class", + "org.apache.spark.util.ClosureCleanerSuite @ closures inside a class with no default constructor", + "org.apache.spark.util.ClosureCleanerSuite @ closures that don't use fields of the outer class", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside an object", + "org.apache.spark.util.ClosureCleanerSuite @ nested closures inside a class", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs", + "org.apache.spark.util.MutableURLClassLoaderSuite @ driver sets context class loader in local mode", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.sort.bypassMergeThreshold": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that contains PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage with dynamic resource allocation enabled", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with an order of magnitude difference in number of partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD with number of partitions similar in order of magnitude", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set without proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set with proper partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup between multiple RDD when defaultParallelism is set; with huge number of partitions in upstream RDDs", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ default partitioner uses largest partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ getNarrowAncestors with multiple parents", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter" + ], + "spark.barrier.sync.timeout": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage with zip()", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.rdd.RDDBarrierSuite @ RDDBarrier mapPartitionsWithIndex", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local mode", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode" + ], + "spark.shuffle.checksum.algorithm": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.IndexShuffleBlockResolverSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter" + ], + "spark.reducer.maxBlocksInFlightPerAddress": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ A batch of blocks is limited by maxBlocksBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are excluded in the preparation", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in a push request are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-33701: Ensure all the blocks are pushed before notifying driver about push completion", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Basic block push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are skipped for push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in flight per address are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Hit maxBlocksInFlightPerAddress limit so that the blocks are deferred", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of shuffle blocks grouped in a single push request is limited by maxBlockBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error retries", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error logging", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Blocks are continued to push even when a block push fails with collision exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ More blocks are not pushed when a block push fails with too late exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Connect exceptions remove all the push requests for that host", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-36255: FileNotFoundException stops the push", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter" + ], + "spark.shuffle.checksum.enabled": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter" + ], + "spark.shuffle.detectCorrupt": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter" + ], + "spark.shuffle.unsafe.file.output.buffer": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.io.LocalDiskShuffleMapOutputWriterSuite @ writing to an outputstream", + "org.apache.spark.shuffle.sort.io.LocalDiskShuffleMapOutputWriterSuite @ writing to a channel", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory" + ], + "spark.reducer.maxReqsInFlight": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ A batch of blocks is limited by maxBlocksBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are excluded in the preparation", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in a push request are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-33701: Ensure all the blocks are pushed before notifying driver about push completion", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Basic block push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are skipped for push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in flight per address are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Hit maxBlocksInFlightPerAddress limit so that the blocks are deferred", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of shuffle blocks grouped in a single push request is limited by maxBlockBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error retries", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error logging", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Blocks are continued to push even when a block push fails with collision exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ More blocks are not pushed when a block push fails with too late exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Connect exceptions remove all the push requests for that host", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-36255: FileNotFoundException stops the push", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter" + ], + "spark.shuffle.detectCorrupt.useExtraMemory": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter" + ], + "spark.locality.wait.node": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.shuffle.file.buffer": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.FailureSuite @ failure in a map stage", + "org.apache.spark.FailureSuite @ failure in tasks in a submitMapStage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.stageAttemptNumber getter", + "org.apache.spark.scheduler.TaskContextSuite @ TaskContext.get.numPartitions getter", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo true", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write with some empty partitions - transferTo false", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ only generate temp shuffle file for non-empty partition", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ cleanup of intermediate files after errors", + "org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriterSuite @ write checksum file", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write empty iterator", + "org.apache.spark.shuffle.sort.SortShuffleWriterSuite @ write with some records", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job details page should display useful information for stages that haven't started", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ single insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ multiple insert", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ insert with collision", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ordering", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSpillSuite @ SPARK-36242 Spill File should not exists if writer close fails", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty data stream with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ few elements per partition with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ empty partitions with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in sorter with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ no sorting or partial aggregation with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ partial aggregation and sorting with spilling with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter" + ], + "spark.reducer.maxSizeInFlight": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier stage that doesn't contain PartitionPruningRDD", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup shuffle", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.FailureSuite @ failure in a two-stage job", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators are registered for cleanups", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ aggregateByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with duplicates", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with negative key hash codes", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with collectAsMap", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ reduceByKey with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ countApproxDistinctByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join all-to-all", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ leftOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with empty RDD", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ cogroup with groupByed RDD having 0 partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ rightOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ fullOuterJoin", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with no matches", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ join with many output partitions", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith3", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ groupWith4", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtract with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ subtractByKey with narrow dependency", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with partitioner", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ lookup with bad partitioner", + "org.apache.spark.rdd.PartitionwiseSampledRDDSuite @ seed distribution", + "org.apache.spark.rdd.PipedRDDSuite @ advanced pipe", + "org.apache.spark.rdd.RDDCleanerSuite @ RDD shuffle cleanup standalone", + "org.apache.spark.rdd.RDDSuite @ basic operations", + "org.apache.spark.rdd.RDDSuite @ distinct with known partitioner preserves partitioning", + "org.apache.spark.rdd.RDDSuite @ partitioner aware union", + "org.apache.spark.rdd.RDDSuite @ treeAggregate", + "org.apache.spark.rdd.RDDSuite @ treeAggregate with ops modifying first args", + "org.apache.spark.rdd.RDDSuite @ SPARK-36419: treeAggregate with finalAggregateOnExecutor set to true", + "org.apache.spark.rdd.RDDSuite @ treeReduce", + "org.apache.spark.rdd.RDDSuite @ empty RDD", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ coalesced RDDs", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ sort an empty RDD", + "org.apache.spark.rdd.RDDSuite @ sortByKey", + "org.apache.spark.rdd.RDDSuite @ sortByKey ascending parameter", + "org.apache.spark.rdd.RDDSuite @ sortByKey with explicit ordering", + "org.apache.spark.rdd.RDDSuite @ repartitionAndSortWithinPartitions", + "org.apache.spark.rdd.RDDSuite @ intersection", + "org.apache.spark.rdd.RDDSuite @ intersection strips duplicates in an input", + "org.apache.spark.rdd.RDDSuite @ zipWithIndex chained with other RDDs (SPARK-4433)", + "org.apache.spark.rdd.RDDSuite @ zipWithUniqueId", + "org.apache.spark.rdd.SortingSuite @ sortByKey", + "org.apache.spark.rdd.SortingSuite @ large array", + "org.apache.spark.rdd.SortingSuite @ large array with one split", + "org.apache.spark.rdd.SortingSuite @ large array with many partitions", + "org.apache.spark.rdd.SortingSuite @ sort descending", + "org.apache.spark.rdd.SortingSuite @ sort descending with one split", + "org.apache.spark.rdd.SortingSuite @ sort descending with many partitions", + "org.apache.spark.rdd.SortingSuite @ more partitions than elements", + "org.apache.spark.rdd.SortingSuite @ empty RDD", + "org.apache.spark.rdd.SortingSuite @ partition balancing", + "org.apache.spark.rdd.SortingSuite @ partition balancing for descending sort", + "org.apache.spark.rdd.SortingSuite @ get a range of elements in a sorted RDD that is on one partition", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions in a descendingly sorted RDD", + "org.apache.spark.rdd.SortingSuite @ get a range of elements over multiple partitions but not taking up full partitions", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ simple use of submitMapStage", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching multiple map output partitions per reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ fetching all map output partitions in one reduce", + "org.apache.spark.scheduler.AdaptiveSchedulingSuite @ more reduce tasks than map output partitions", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.OutputCommitCoordinatorSuite @ SPARK-24589: Make sure stage state is cleaned up", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.serializer.KryoSerializerAutoResetDisabledSuite @ sort-shuffle with bypassMergeSort (SPARK-7873)", + "org.apache.spark.shuffle.BlockStoreShuffleReaderSuite @ read() releases resources on completion", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ A batch of blocks is limited by maxBlocksBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are excluded in the preparation", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in a push request are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-33701: Ensure all the blocks are pushed before notifying driver about push completion", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Basic block push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are skipped for push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in flight per address are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Hit maxBlocksInFlightPerAddress limit so that the blocks are deferred", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of shuffle blocks grouped in a single push request is limited by maxBlockBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error retries", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error logging", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Blocks are continued to push even when a block push fails with collision exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ More blocks are not pushed when a block push fails with too late exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Connect exceptions remove all the push requests for that host", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-36255: FileNotFoundException stops the push", + "org.apache.spark.ShuffleNettySuite @ groupByKey without compression", + "org.apache.spark.ShuffleNettySuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleNettySuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleNettySuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleNettySuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleNettySuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ groupByKey without compression", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ groupByKey without compression", + "org.apache.spark.SortShuffleSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.SortShuffleSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.SortShuffleSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.SortShuffleSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the serialized path", + "org.apache.spark.SortShuffleSuite @ SortShuffleManager properly cleans up files for shuffles that use the deserialized path", + "org.apache.spark.StatusTrackerSuite @ basic status API usage", + "org.apache.spark.ui.UISeleniumSuite @ all jobs page should be rendered even though we configure the scheduling mode to fair", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars / cells reflect skipped stages / tasks", + "org.apache.spark.ui.UISeleniumSuite @ stages that aren't run appear as 'skipped stages' after a job finishes", + "org.apache.spark.ui.UISeleniumSuite @ jobs with stages that are skipped should show correct link descriptions on all jobs page", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ job stages should have expected dotfile under DAG visualization", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple aggregator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ simple cogroup", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter" + ], + "spark.worker.ui.retainedExecutors": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", + "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.worker.ui.retainedDrivers": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=true", + "org.apache.spark.deploy.worker.WorkerSuite @ don't cleanup non-shuffle files after executor exits when config spark.storage.cleanupFilesAfterExecutorExit=false", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.scheduler.maxRegisteredResourcesWaitingTime": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.shuffle.service.fetch.rdd.enabled": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceMetricsSuite @ SPARK-31646: metrics should be registered", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ basic executor timeout", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track tasks running on executor", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ use appropriate time out depending on whether blocks are stored", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ keeps track of stored blocks for each rdd and split", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ handle timeouts correctly with multiple executors", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-38019: timedOutExecutors should be deterministic", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-27677: don't track blocks stored on disk when using shuffle service", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track executors pending for removal", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle block tracking", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28839: Avoids NPE in context cleaner when shuffle service is on", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle tracking with multiple executors and concurrent jobs", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28455: avoid overflow in timeout calculation", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-37688: ignore SparkListenerBlockUpdated event if executor was not active", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.shuffle.service.db.enabled": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceMetricsSuite @ SPARK-31646: metrics should be registered", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ WorkDirCleanup cleans only app dirs whenspark.shuffle.service.db.enabled=false", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.scheduler.revive.interval": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.executor.extraLibraryPath": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.executor.logs.rolling.strategy": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - time-based rolling close stream", + "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - size-based rolling close stream", + "org.apache.spark.util.FileAppenderSuite @ file appender selection", + "org.apache.spark.util.FileAppenderSuite @ file appender async close stream abruptly", + "org.apache.spark.util.FileAppenderSuite @ file appender async close stream gracefully" + ], + "spark.shuffle.maxChunksBeingTransferred": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message abort", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in client mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in client RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: unserializable error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ StackOverflowError should be sent back and Dispatcher should survive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-31233: ask rpcEndpointRef in client mode timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.shuffle.service.port": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceMetricsSuite @ SPARK-31646: metrics should be registered", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.submit.deployMode": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ specify deploy mode through configuration", + "org.apache.spark.deploy.SparkSubmitSuite @ handles confs with flag equivalents", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ client mode with a k8s master url", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ Avoid setting spark.task.cpus unreasonably (SPARK-27192)", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs" + ], + "spark.scheduler.minRegisteredResourcesRatio": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.rpc.io.connectionTimeout": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message timeout", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask a message abort", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: remotely error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in sever RpcEnv when another RpcEnv is in client mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ network events in client RpcEnv when another RpcEnv is in server mode", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ sendWithReply: unserializable error", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ send with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with authentication", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with SASL encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ ask with AES encryption", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-14699: RpcEnv.shutdown should not fire onDisconnected events", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ StackOverflowError should be sent back and Dispatcher should survive", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ SPARK-31233: ask rpcEndpointRef in client mode timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.task.maxFailures": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ super simple job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ multi-stage job", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job with fetch failure", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ job failure after 4 attempts", + "org.apache.spark.scheduler.BasicSchedulerIntegrationSuite @ SPARK-23626: RDD with expensive getPartitions() doesn't block scheduler loop", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.PoolSuite @ FIFO Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Fair Scheduler Test", + "org.apache.spark.scheduler.PoolSuite @ Nested Pool Test", + "org.apache.spark.scheduler.PoolSuite @ FIFO scheduler uses root pool and not spark.scheduler.pool property", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler uses default pool when spark.scheduler.pool property is not set", + "org.apache.spark.scheduler.PoolSuite @ FAIR Scheduler creates a new pool when spark.scheduler.pool property points to a non-existent pool", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-32653: Decommissioned host/executor should be considered as inactive", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not always schedule tasks on the same workers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for multiple CPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler does not crash when tasks are not serializable", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ concurrent attempts for the same stage only have one active taskset", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule more tasks after a taskset is zombie", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ tasks are not re-scheduled while executor loss reason is pending", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ if a task finishes with TaskState.LOST its executor is marked as dead", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ don't schedule for a barrier taskSet if available slots are less than pending tasks gpus limiting", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together diff ResourceProfile", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ schedule tasks for a barrier taskSet if all tasks can be launched together", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ cancelTasks shall kill all the running tasks and fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ killAllTaskAttempts shall kill all the running tasks and not fail the stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ mark taskset for a barrier stage as zombie in case a task fails", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler correctly accounts for GPUs per task with fractional amount", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Scheduler works with multiple ResourceProfiles and gpus", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduler should keep the decommission state where host was decommissioned", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ test full decommissioning flow", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost could fail task set if task is running", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-39955: executor lost should not fail task set if task is launching", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.cores.max": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles mesos client mode", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.redaction.regex": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ multiple resource profiles", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ app environment", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Process succeeds instantly", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Process failing several times and then succeeding", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Process doesn't restart if not supervised", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Process doesn't restart if killed", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Reset of backoff counter", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Kill process finalized with state KILLED", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Finalized with state FINISHED", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Finalized with state FAILED", + "org.apache.spark.deploy.worker.DriverRunnerTest @ Handle exception starting process", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Event logging with password redaction", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Spark-33504 sensitive attributes redaction in properties", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkConfSuite @ SPARK-27244 toDebugString redacts sensitive information", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.UtilsSuite @ redact sensitive information", + "org.apache.spark.util.UtilsSuite @ redact sensitive information in command line args" + ], + "spark.executor.logs.rolling.maxSize": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - time-based rolling close stream", + "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - size-based rolling close stream", + "org.apache.spark.util.FileAppenderSuite @ file appender selection", + "org.apache.spark.util.FileAppenderSuite @ file appender async close stream abruptly", + "org.apache.spark.util.FileAppenderSuite @ file appender async close stream gracefully" + ], + "spark.deploy.recoveryMode": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ can use a custom recovery mode factory", + "org.apache.spark.deploy.master.MasterSuite @ master correctly recover the application", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - spread out", + "org.apache.spark.deploy.master.MasterSuite @ basic scheduling with more memory - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling with executor limit AND cores per executor AND max cores - no spread out", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles", + "org.apache.spark.deploy.master.MasterSuite @ scheduling for app with multiple resource profiles with max cores", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-13604: Master should ask Worker kill unknown executors and drivers", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-20529: Master should reply the address received from worker", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-27510: Master should avoid dead loop while launching executor failed in Worker", + "org.apache.spark.deploy.master.MasterSuite @ All workers on a host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ No workers should be decommissioned with invalid host", + "org.apache.spark.deploy.master.MasterSuite @ Only worker on host should be decommissioned", + "org.apache.spark.deploy.master.MasterSuite @ SPARK-19900: there should be a corresponding driver for the app after relaunching driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from driver", + "org.apache.spark.deploy.master.MasterSuite @ assign/recycle resources to/from executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.executor.logs.rolling.time.interval": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values", + "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - time-based rolling close stream", + "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - size-based rolling close stream", + "org.apache.spark.util.FileAppenderSuite @ file appender selection", + "org.apache.spark.util.FileAppenderSuite @ file appender async close stream abruptly", + "org.apache.spark.util.FileAppenderSuite @ file appender async close stream gracefully" + ], + "spark.shuffle.service.db.backend": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceMetricsSuite @ SPARK-31646: metrics should be registered", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.shuffle.service.index.cache.size": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.client.AppClientSuite @ interface methods of AppClient using local Master", + "org.apache.spark.deploy.client.AppClientSuite @ request executors with multi resource profiles", + "org.apache.spark.deploy.client.AppClientSuite @ request from AppClient before initialized with master", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceLevelDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceMetricsSuite @ SPARK-31646: metrics should be registered", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Recover shuffle data with spark.shuffle.service.db.enabled=true after shuffle service restart", + "org.apache.spark.deploy.ExternalShuffleServiceRocksDBSuite @ Can't recover shuffle data with spark.shuffle.service.db.enabled=false after shuffle service restart", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (small number of executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedExecutors (more executors)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (small number of drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ test clearing of finishedDrivers (more drivers)", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could be launched without any resources", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.ui.custom.executor.log.url": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ResultStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ submit a barrier ShuffleMapStage that requires more slots than current total under local-cluster mode", + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ SPARK-39983 - Broadcasted value not cached on driver", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation default behavior", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores <= cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with max cores > cores per worker", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ dynamic allocation with cores per executor AND max cores", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill the same executor twice (SPARK-9795)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ the pending replacement executors should not be lost (SPARK-10515)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ disable force kill for busy executors (SPARK-9552)", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ kill all executors on localhost", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.HeartbeatReceiverSuite @ expire dead hosts should kill executors with replacement (SPARK-8119)", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when spark.task.cpus > 1", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSchedulerCreationSuite @ local-cluster", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addJar can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test gpu driver resource files and discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- default transitive = true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- transitive=false will not download dependency jars", + "org.apache.spark.SparkContextSuite @ SPARK-34506: Add jar support Ivy URI -- test exclude param when transitive unspecified", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test different version", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test invalid param", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test multiple transitive params", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test param key case sensitive", + "org.apache.spark.SparkContextSuite @ SPARK-33084: Add jar support Ivy URI -- test transitive value case insensitive", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ ExternalAppendOnlyMap shouldn't fail when forced to spill before calling its iterator", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with null keys and values", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 spill during iteration leaks internal map", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ drop all references to the underlying map once the iterator is exhausted", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ sort without breaking sorting contracts with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with many hash collisions", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with hash collisions using the Int.MaxValue key", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling with null keys and values" + ], + "spark.eventLog.gcMetrics.oldGenerationGarbageCollectors": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser" + ], + "spark.resources.discoveryPlugin": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from discovery script while launching", + "org.apache.spark.deploy.worker.WorkerSuite @ worker could load resources from resources file and discovery script while launching", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ error checking parsing resources and executor and task configs", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option with resource profile", + "org.apache.spark.executor.CoarseGrainedExecutorBackendSuite @ use resource discovery and allocated file option", + "org.apache.spark.internal.plugin.PluginContainerSuite @ plugin initialization in non-local mode with resources", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ plugin initialization in non-local mode fpga and gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ single plugin gpu", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ multiple plugins with one empty", + "org.apache.spark.resource.ResourceDiscoveryPluginSuite @ empty plugin fallback to discovery script", + "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer no addresses errors", + "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer multiple resource types", + "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover the remaining", + "org.apache.spark.resource.ResourceUtilsSuite @ get from resources file and discover resource profile remaining", + "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer multiple gpus on driver", + "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer script returns mismatched name", + "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer with invalid class", + "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer script returns invalid format", + "org.apache.spark.resource.ResourceUtilsSuite @ Resource discoverer script doesn't exist", + "org.apache.spark.resource.ResourceUtilsSuite @ gpu's specified but not a discovery script", + "org.apache.spark.SparkContextSuite @ test driver discovery under local-cluster mode", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode" + ], + "spark.eventLog.gcMetrics.youngGenerationGarbageCollectors": [ + "org.apache.spark.BarrierStageOnSubmittedSuite @ SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks should consider all kinds of resources for the barrier stage", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should drop zero accumulator updates", + "org.apache.spark.executor.ExecutorSuite @ Heartbeat should not drop zero accumulator updates when the conf is disabled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in DirectTaskResult", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in TaskKilled", + "org.apache.spark.executor.ExecutorSuite @ Send task executor metrics in ExceptionFailure", + "org.apache.spark.executor.ExecutorSuite @ SPARK-34949: do not re-register BlockManager when executor is shutting down", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.RDDSuite @ repartitioned RDDs perform load balancing", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.ui.UISeleniumSuite @ job progress bars should handle stage / task failures", + "org.apache.spark.ui.UISeleniumSuite @ stage & job retention", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.ui.UISeleniumSuite @ Staleness of Spark UI should not last minutes or hours", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser" + ], + "spark.shuffle.io.connectionTimeout": [ + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = off)", + "org.apache.spark.broadcast.BroadcastSuite @ Accessing TorrentBroadcast variables in a local cluster (encryption = on)", + "org.apache.spark.broadcast.BroadcastSuite @ Test Lazy Broadcast variables with TorrentBroadcast", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors only in distributed mode", + "org.apache.spark.broadcast.BroadcastSuite @ Unpersisting TorrentBroadcast on executors and driver in distributed mode", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.DistributedSuite @ task throws not serializable exception", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ accumulators", + "org.apache.spark.DistributedSuite @ broadcast variables", + "org.apache.spark.DistributedSuite @ repeatedly failing task", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM", + "org.apache.spark.DistributedSuite @ repeatedly failing task that crashes JVM with a zero exit code (SPARK-16925)", + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.DistributedSuite @ compute without caching when no partitions fit in memory", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ passing environment variables to cluster", + "org.apache.spark.DistributedSuite @ recover from node failures", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.DistributedSuite @ unpersist RDDs", + "org.apache.spark.DistributedSuite @ reference partitions inside a task", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ global sync by barrier() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ share messages with allGather() call", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if we attempt to synchronize with different blocking calls", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ successively sync with allGather and barrier", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ support multiple barrier() call within a single task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception on barrier() call timeout", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if barrier() call doesn't happen on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ throw exception if the number of barrier() calls are not the same on every task", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ serialized task larger than max RPC message size", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify task with no decommissioning works as expected", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.security.CryptoStreamUtilsSuite @ encryption key propagation to executors", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Java", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ support barrier execution mode under local-cluster mode", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.SparkContextSuite @ test resource scheduling under local-cluster mode", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Disallow to create SparkContext in executors", + "org.apache.spark.SparkContextSuite @ SPARK-32160: Allow to create SparkContext in executors if the config is set", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE should be counted as network failure", + "org.apache.spark.SparkContextSuite @ SPARK-39957: ExitCode HEARTBEAT_FAILURE will be counted as task failure whenEXECUTOR_REMOVE_DELAY is disabled", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerBasicStrategyReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 2 replicas - 1 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 3 replicas - 2 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 4 replicas - 3 block manager deletions", + "org.apache.spark.storage.BlockManagerProactiveReplicationSuite @ proactive block replication - 5 replicas - 4 block manager deletions", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 2x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - 3x replication", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - mixed between 1x to 5x", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - off-heap", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - replication failures", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = false)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ test block replication failures when block is received by remote block manager but putBlock fails (stream = true)", + "org.apache.spark.storage.BlockManagerReplicationSuite @ block replication - addition and deletion of block managers", + "org.apache.spark.storage.BlockManagerSuite @ master + 2 managers interaction", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ test decommission block manager should not be part of peers", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser" + ], + "spark.io.compression.snappy.blockSize": [ + "org.apache.spark.broadcast.BroadcastSuite @ TorrentBroadcast's blockifyObject and unblockifyObject are inverses", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(snappy)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(snappy)", + "org.apache.spark.io.CompressionCodecSuite @ snappy compression codec", + "org.apache.spark.io.CompressionCodecSuite @ snappy compression codec short form", + "org.apache.spark.io.CompressionCodecSuite @ snappy supports concatenation of serialized streams", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Basic event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption" + ], + "spark.checkpoint.compress": [ + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.cleaner.referenceTracking.cleanCheckpoints": [ + "org.apache.spark.CheckpointStorageSuite @ checkpoint compression", + "org.apache.spark.CheckpointStorageSuite @ cache checkpoint preferred location", + "org.apache.spark.CheckpointStorageSuite @ SPARK-31484: checkpoint should not fail in retry", + "org.apache.spark.CheckpointSuite @ basic checkpointing [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointing partitioners [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ CheckpointRDD with zero partitions [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ checkpointAllMarkedAncestors [reliable checkpoint]", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.locality.wait.process": [ + "org.apache.spark.CheckpointSuite @ basic checkpointing [local checkpoint]", + "org.apache.spark.CheckpointSuite @ RDDs with one-to-one dependencies [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ParallelCollectionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [reliable checkpoint]", + "org.apache.spark.CheckpointSuite @ BlockRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ShuffleRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ UnionRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CartesianRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoalescedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ CoGroupedRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ ZippedPartitionsRDD [local checkpoint]", + "org.apache.spark.CheckpointSuite @ PartitionerAwareUnionRDD [local checkpoint]", + "org.apache.spark.ContextCleanerSuite @ automatically cleanup normal checkpoint", + "org.apache.spark.DistributedSuite @ compute when only some partitions fit in memory", + "org.apache.spark.DistributedSuite @ recover from node failures with replication", + "org.apache.spark.FailureSuite @ failure because cached RDD partitions are missing from DiskStore (SPARK-15736)", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ file caching", + "org.apache.spark.JobCancellationSuite @ do not put partially executed partitions into cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.PartitioningSuite @ RangPartitioner.sketch", + "org.apache.spark.PartitioningSuite @ RangePartitioner should run only one job if data is roughly balanced", + "org.apache.spark.PartitioningSuite @ RangePartitioner should work well on unbalanced data", + "org.apache.spark.rdd.JdbcRDDSuite @ basic functionality", + "org.apache.spark.rdd.JdbcRDDSuite @ large id overflow", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ basic lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ indirect lineage truncation - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ missing checkpoint block fails with informative message", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ foldByKey with mutable result type", + "org.apache.spark.rdd.RDDSuite @ basic caching", + "org.apache.spark.rdd.RDDSuite @ SPARK-27666: Do not release lock while TaskContext already completed", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before any resources have been rejected", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when isAllFreeResources = true", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - task set with no locality requirements should not starve one with them", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) reset time if last full resource offer (isAllResources = true) was accepted as well as any following partial resource offers", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset time if any offer was rejected since last full offer was fully accepted", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-16106 locality levels updated if executor added to existing host", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Locality should be used for bulk offers even with delay scheduling off", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test delay scheduling for barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-24818: test resource revert of barrier TaskSetManager", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-37300: TaskSchedulerImpl should ignore task finished event if its task was finished state", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Disk", + "org.apache.spark.storage.FlatmapIteratorSuite @ Flatmap Iterator to Memory", + "org.apache.spark.ui.UISeleniumSuite @ stages page should show skipped stages", + "org.apache.spark.util.PeriodicRDDCheckpointerSuite @ Checkpointing" + ], + "spark.io.compression.zstd.level": [ + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.io.CompressionCodecSuite @ zstd compression codec", + "org.apache.spark.io.CompressionCodecSuite @ zstd compression codec short form", + "org.apache.spark.io.CompressionCodecSuite @ zstd supports concatenation of serialized zstd", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Basic event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser" + ], + "spark.io.compression.zstd.bufferSize": [ + "org.apache.spark.ContextCleanerSuite @ automatically cleanup RDD + shuffle + broadcast in distributed mode", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.DistributedSuite @ simple groupByKey", + "org.apache.spark.DistributedSuite @ groupByKey where map output sizes exceed maxMbInFlight", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-map", + "org.apache.spark.DistributedSuite @ recover from repeated node failures during shuffle-reduce", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.io.CompressionCodecSuite @ zstd compression codec", + "org.apache.spark.io.CompressionCodecSuite @ zstd compression codec short form", + "org.apache.spark.io.CompressionCodecSuite @ zstd supports concatenation of serialized zstd", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch below max RPC message size", + "org.apache.spark.MapOutputTrackerSuite @ remote fetch using broadcast", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map sizes with merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses from merged shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: get map statuses for merged shuffle block chunks", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34939: remote fetch using broadcast if broadcasted value is destroyed", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be enabled in some scenarios with push based shuffle", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-36892: Batch fetch should be disabled in some scenarios with push based shuffle", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Basic event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.serializer.KryoSerializerDistributedSuite @ kryo objects are serialised consistently in different processes", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service disabled (SPARK-32077)", + "org.apache.spark.shuffle.ShuffleDriverComponentsSuite @ test serialization of shuffle initialization conf to executors", + "org.apache.spark.ShuffleNettySuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleNettySuite @ shuffle serializer", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks", + "org.apache.spark.ShuffleNettySuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleNettySuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleNettySuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleNettySuite @ subtract mutable pairs", + "org.apache.spark.ShuffleNettySuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleNettySuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle non-zero block size", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle serializer", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ zero sized blocks without kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ shuffle on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sorting on mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ cogroup using mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ subtract mutable pairs", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SortShuffleSuite @ shuffle non-zero block size", + "org.apache.spark.SortShuffleSuite @ shuffle serializer", + "org.apache.spark.SortShuffleSuite @ zero sized blocks", + "org.apache.spark.SortShuffleSuite @ zero sized blocks without kryo", + "org.apache.spark.SortShuffleSuite @ shuffle on mutable pairs", + "org.apache.spark.SortShuffleSuite @ sorting on mutable pairs", + "org.apache.spark.SortShuffleSuite @ cogroup using mutable pairs", + "org.apache.spark.SortShuffleSuite @ subtract mutable pairs", + "org.apache.spark.SortShuffleSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.SortShuffleSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser" + ], + "spark.scheduler.listenerbus.eventqueue.shared.capacity": [ + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers should not result in job failure", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission stalled workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission eager workers ensure that fetch failures lead to rerun", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that correct log urls get propagated from workers", + "org.apache.spark.deploy.LogUrlsStandaloneSuite @ verify that log urls reflect SPARK_PUBLIC_DNS (SPARK-6175)", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in a stage", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in multiple stages", + "org.apache.spark.InternalAccumulatorSuite @ internal accumulators in resubmitted stages", + "org.apache.spark.JobCancellationSuite @ job group", + "org.apache.spark.JobCancellationSuite @ inherited job group (SPARK-6629)", + "org.apache.spark.JobCancellationSuite @ job group with interruption", + "org.apache.spark.JobCancellationSuite @ task reaper kills JVM if killed tasks keep running for too long", + "org.apache.spark.JobCancellationSuite @ task reaper will not kill JVM if spark.task.killTimeout == -1", + "org.apache.spark.JobCancellationSuite @ two jobs sharing the same stage", + "org.apache.spark.JobCancellationSuite @ interruptible iterator of shuffle reader", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for old hadoop with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with cache and coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics for new Hadoop API with coalesce", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics when reading text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - simple", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read - more stages", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records - New Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics on records read with cache", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input read/write and shuffle read/write metrics all line up", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with interleaved reads", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics on records written - new Hadoop API", + "org.apache.spark.metrics.InputOutputMetricsSuite @ output metrics when writing text file", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new CombineFileInputFormat", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with old Hadoop API in different thread", + "org.apache.spark.metrics.InputOutputMetricsSuite @ input metrics with new Hadoop API in different thread", + "org.apache.spark.rdd.RDDSuite @ SPARK-40211: customize initialNumPartitions for take", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-24818: disable legacy delay scheduling for barrier stage", + "org.apache.spark.scheduler.BarrierTaskContextSuite @ SPARK-34069: Kill barrier tasks should respect SPARK_JOB_INTERRUPT_ON_CANCEL", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ compute max number of concurrent tasks can be launched when some executors are busy", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ custom log url for Spark UI is applied", + "org.apache.spark.scheduler.CoarseGrainedSchedulerBackendSuite @ extra resources from executor", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-3353] parent stage should have lower stage id", + "org.apache.spark.scheduler.DAGSchedulerSuite @ [SPARK-13902] Ensure no duplicate stages are created", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ zero split job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial job w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cache location preferences w/ dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ regression test for getCacheLocs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getMissingParentStages should consider all ancestor RDDs' cache statuses", + "org.apache.spark.scheduler.DAGSchedulerSuite @ avoid exponential blowup when getting preferred locs list", + "org.apache.spark.scheduler.DAGSchedulerSuite @ unserializable task", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial job cancellation", + "org.apache.spark.scheduler.DAGSchedulerSuite @ job cancellation no-kill backend", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when executor failure without shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-28967 properties must be cloned before posting to listener bus for 0 partition", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Failures in different stages should not trigger an overall abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Non-consecutive stage failures don't trigger abort", + "org.apache.spark.scheduler.DAGSchedulerSuite @ trivial shuffle with multiple fetch failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Fail the job if a barrier ResultTask failed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ late fetch failures don't cause multiple concurrent attempts for the same map stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ extremely late fetch failures don't cause multiple concurrent attempts for the same stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task events always posted in speculation / when stage is killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ ignore late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run shuffle with map stage failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle fetch failure in a reused shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ don't submit stage until its dependencies map outputs are registered (SPARK-5259)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ register map outputs correctly after ExecutorLost and task Resubmitted", + "org.apache.spark.scheduler.DAGSchedulerSuite @ failure of stage used by two jobs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ run trivial shuffle with out-of-band executor failure and retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ recursive shuffle failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ cached post-shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved accumulator should not impact other accumulators", + "org.apache.spark.scheduler.DAGSchedulerSuite @ misbehaved resultHandler should not crash DAGScheduler and SparkContext", + "org.apache.spark.scheduler.DAGSchedulerSuite @ invalid spark.job.interruptOnCancel should not crash DAGScheduler", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulator not calculated for resubmitted task in result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ accumulators are updated on exception failures and task killed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce tasks should be placed locally with map output", + "org.apache.spark.scheduler.DAGSchedulerSuite @ reduce task locality preferences should only include machines with largest map outputs", + "org.apache.spark.scheduler.DAGSchedulerSuite @ stages with both narrow and shuffle dependencies use narrow ones for locality", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Spark exceptions should include call site in stack trace", + "org.apache.spark.scheduler.DAGSchedulerSuite @ catch errors in event loop", + "org.apache.spark.scheduler.DAGSchedulerSuite @ simple map stage submission", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with reduce stage also depending on the data", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with multiple shared stages and failures", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Trigger mapstage's job listener in submitMissingTasks", + "org.apache.spark.scheduler.DAGSchedulerSuite @ map stage submission with executor failure late map task completions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles correctly returns only direct shuffle parents", + "org.apache.spark.scheduler.DAGSchedulerSuite @ task end event should have updated accumulators (SPARK-20342)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from the same stage attempt don't trigger multiple stage retries", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Barrier task failures from a previous stage attempt don't trigger stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: abort stage while using old fetch protocol", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: retry all the succeeding stages when the map stage is indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-25341: continuous indeterminate stage roll back", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-29042: Sampled RDD with unordered input should be indeterminate", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: cannot rollback a result stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: local checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ Completions in zombie tasksets update status of non-zombie taskset", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test default resource profile", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profiles errors by default", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test 2 resource profile with merge conflict config true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ test multiple resource profiles created from merging use same rp", + "org.apache.spark.scheduler.DAGSchedulerSuite @ getShuffleDependenciesAndResourceProfiles returns deps and profiles correctly", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ don't call sc.stop in listener", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation and shutdown of LiveListenerBus", + "org.apache.spark.scheduler.SparkListenerSuite @ bus.stop() waits for the event queue to completely drain", + "org.apache.spark.scheduler.SparkListenerSuite @ metrics for dropped listener events", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo", + "org.apache.spark.scheduler.SparkListenerSuite @ basic creation of StageInfo with shuffle", + "org.apache.spark.scheduler.SparkListenerSuite @ StageInfo with fewer tasks than partitions", + "org.apache.spark.scheduler.SparkListenerSuite @ local metrics", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() not called when result sent directly", + "org.apache.spark.scheduler.SparkListenerSuite @ SparkListener moves on if a listener throws an exception", + "org.apache.spark.scheduler.SparkListenerSuite @ registering listeners via spark.extraListeners", + "org.apache.spark.scheduler.SparkListenerSuite @ add and remove listeners to/from LiveListenerBus queues", + "org.apache.spark.scheduler.SparkListenerSuite @ interrupt within listener is handled correctly: throw interrupt", + "org.apache.spark.scheduler.SparkListenerSuite @ interrupt within listener is handled correctly: set Thread interrupted", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-30285: Fix deadlock in AsyncEventQueue.removeListenerOnError: throw interrupt", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-30285: Fix deadlock in AsyncEventQueue.removeListenerOnError: set Thread interrupted", + "org.apache.spark.scheduler.SparkListenerSuite @ event queue size can be configured through spark conf", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-39973: Suppress error logs when the number of timers is set to 0", + "org.apache.spark.scheduler.SparkListenerWithClusterSuite @ SparkListener sends executor added message", + "org.apache.spark.scheduler.WorkerDecommissionSuite @ verify a running task with all workers decommissioned succeeds", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleNettySuite @ metrics for shuffle with aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ShuffleOldFetchProtocolSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle without aggregation", + "org.apache.spark.SortShuffleSuite @ metrics for shuffle with aggregation", + "org.apache.spark.SparkContextSuite @ register and deregister Spark listener from SparkContext", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise interrupted exception on cancel", + "org.apache.spark.SparkContextSuite @ Killing tasks that raise runtime exception on cancel", + "org.apache.spark.SparkContextSuite @ cancel zombie tasks in a result stage when the job finishes", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=false)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-32850: BlockManager decommission should respect the configuration (enabled=true)", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after task start", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that an already running task which is going to cache data succeeds on a decommissioned executor after one task ends but before job ends", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that shuffle blocks are migrated", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ verify that both migrations can work at the same time", + "org.apache.spark.storage.BlockManagerDecommissionIntegrationSuite @ SPARK-36782 not deadlock if MapOutput uses broadcast", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ spilling with compression and encryption", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ SPARK-22713 external aggregation updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with kryo ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ spilling in local cluster with many reduce tasks with java ser", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle", + "org.apache.spark.util.collection.ExternalSorterSuite @ cleanup of intermediate files in shuffle with failures", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory" + ], + "spark.shuffle.service.removeShuffle": [ + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM" + ], + "spark.shuffle.registration.timeout": [ + "org.apache.spark.deploy.DecommissionWorkerSuite @ decommission workers ensure that shuffle output is regenerated even with shuffle service", + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExternalShuffleServiceSuite @ groupByKey without compression", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle non-zero block size", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle serializer", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ zero sized blocks without kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sorting on mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ cogroup using mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ subtract mutable pairs", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Kryo", + "org.apache.spark.ExternalShuffleServiceSuite @ sort with Java non serializable class - Java", + "org.apache.spark.ExternalShuffleServiceSuite @ shuffle with different compression settings (SPARK-3426)", + "org.apache.spark.ExternalShuffleServiceSuite @ [SPARK-4085] rerun map stage if reduce stage cannot find its local shuffle file", + "org.apache.spark.ExternalShuffleServiceSuite @ cannot find its local shuffle file if no execution of the stage and rerun shuffle", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle without aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ metrics for shuffle with aggregation", + "org.apache.spark.ExternalShuffleServiceSuite @ multiple simultaneous attempts for one task (SPARK-8029)", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-34541: shuffle can be removed", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-36206: shuffle checksum detect disk corruption", + "org.apache.spark.ExternalShuffleServiceSuite @ using external shuffle service", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-25888: using external shuffle service fetching disk persisted blocks", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-37618: external shuffle service removes shuffle blocks from deallocated executors", + "org.apache.spark.ExternalShuffleServiceSuite @ SPARK-38640: memory only blocks can unpersist using shuffle service cache fetching", + "org.apache.spark.MapOutputTrackerSuite @ SPARK-34826: Adaptive shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ All shuffle files on the storage endpoint should be cleaned up when it is lost", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor process lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files lost when worker lost with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ shuffle files not lost when executor failure with shuffle service", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ host local shuffle reading with external shuffle service enabled (SPARK-27651)", + "org.apache.spark.shuffle.HostLocalShuffleReadingSuite @ Enable host local shuffle reading when push based shuffle is enabled", + "org.apache.spark.SparkContextSuite @ SPARK-36772: Store application attemptId in BlockStoreClient for push based shuffle", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM" + ], + "spark.eventLog.rolling.maxFileSize": [ + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ events for finished job are dropped in new compact file", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ Don't compact file if score is lower than threshold", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec None", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lz4)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lzf)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(snappy)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ create EventLogFileWriter with enable/disable rolling", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Log overwriting", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec None", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lz4)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lzf)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(snappy)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - the max size of event log file size less than lower limit", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ create EventLogFileWriter with enable/disable rolling" + ], + "spark.eventLog.overwrite": [ + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ events for finished job are dropped in new compact file", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ Don't compact file if score is lower than threshold", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ rewrite files with test filters", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec None", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lz4)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lzf)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(snappy)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ create EventLogFileWriter with enable/disable rolling", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Log overwriting", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec None", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lz4)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lzf)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(snappy)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - the max size of event log file size less than lower limit", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ create EventLogFileWriter with enable/disable rolling", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Log overwriting", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Basic event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Spark-33504 sensitive attributes redaction in properties", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Executor metrics update", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression" + ], + "spark.eventLog.buffer.kb": [ + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ events for finished job are dropped in new compact file", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ Don't compact file if score is lower than threshold", + "org.apache.spark.deploy.history.EventLogFileCompactorSuite @ rewrite files with test filters", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ compact event log files", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-33146: don't let one bad rolling log folder prevent loading other applications", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-39225: Support spark.history.fs.update.batchSize", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ SPARK-36354: EventLogFileReader should skip rolling event log directories with no logs", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec None", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lz4)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(lzf)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(snappy)", + "org.apache.spark.deploy.history.RollingEventLogFilesReaderSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ create EventLogFileWriter with enable/disable rolling", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Log overwriting", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec None", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lz4)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(lzf)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(snappy)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - codec Some(zstd)", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ rolling event log files - the max size of event log file size less than lower limit", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ create EventLogFileWriter with enable/disable rolling", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Log overwriting", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Basic event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Spark-33504 sensitive attributes redaction in properties", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Executor metrics update", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression" + ], + "spark.driver.log.dfsDir": [ + "org.apache.spark.deploy.history.LevelDBBackendFsHistoryProviderSuite @ driver log cleaner", + "org.apache.spark.deploy.history.RocksDBBackendFsHistoryProviderSuite @ driver log cleaner", + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs" + ], + "spark.eventLog.dir": [ + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log" + ], + "spark.scheduler.listenerbus.eventqueue.eventLog.capacity": [ + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Basic event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Spark-33504 sensitive attributes redaction in properties", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Executor metrics update", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression", + "org.apache.spark.scheduler.SparkListenerSuite @ interrupt within listener is handled correctly: throw interrupt", + "org.apache.spark.scheduler.SparkListenerSuite @ interrupt within listener is handled correctly: set Thread interrupted", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-30285: Fix deadlock in AsyncEventQueue.removeListenerOnError: throw interrupt", + "org.apache.spark.scheduler.SparkListenerSuite @ SPARK-30285: Fix deadlock in AsyncEventQueue.removeListenerOnError: set Thread interrupted", + "org.apache.spark.scheduler.SparkListenerSuite @ event queue size can be configured through spark conf" + ], + "spark.eventLog.logBlockUpdates.enabled": [ + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log" + ], + "spark.eventLog.rolling.enabled": [ + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ create EventLogFileWriter with enable/disable rolling", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ create EventLogFileWriter with enable/disable rolling", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Basic event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ End-to-end event logging with compression", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Spark-33504 sensitive attributes redaction in properties", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ Executor metrics update", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay", + "org.apache.spark.scheduler.ReplayListenerSuite @ End-to-end replay with compression" + ], + "spark.eventLog.logStageExecutorMetrics": [ + "org.apache.spark.deploy.history.LevelDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.deploy.history.RocksDBBackendHistoryServerSuite @ incomplete apps get refreshed", + "org.apache.spark.scheduler.EventLoggingListenerSuite @ SPARK-31764: isBarrier should be logged in event log" + ], + "spark.eventLog.compression.codec": [ + "org.apache.spark.deploy.history.RollingEventLogFilesWriterSuite @ Use the defalut value of spark.eventLog.compression.codec", + "org.apache.spark.deploy.history.SingleEventLogFileWriterSuite @ Use the defalut value of spark.eventLog.compression.codec" + ], + "spark.ui.reverseProxyUrl": [ + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available with reverseProxy", + "org.apache.spark.deploy.master.MasterSuite @ master/worker web ui available behind front-end reverseProxy", + "org.apache.spark.SparkContextSuite @ SPARK-34659: check invalid UI_REVERSE_PROXY_URL" + ], + "spark.deploy.zookeeper.dir": [ + "org.apache.spark.deploy.master.PersistenceEngineSuite @ ZooKeeperPersistenceEngine" + ], + "spark.deploy.zookeeper.url": [ + "org.apache.spark.deploy.master.PersistenceEngineSuite @ ZooKeeperPersistenceEngine" + ], + "spark.submit.pyFiles": [ + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create submission", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ create then kill then request status", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ good request paths", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ server returns unknown fields", + "org.apache.spark.deploy.rest.StandaloneRestSubmitSuite @ client handles faulty server", + "org.apache.spark.deploy.SparkSubmitSuite @ specify deploy mode through configuration", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-33530: handles standalone mode with archives", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles legacy standalone cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles standalone client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles mesos client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles k8s cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ handles confs with flag equivalents", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.SparkSubmitSuite @ resolves command line argument paths correctly", + "org.apache.spark.deploy.SparkSubmitSuite @ resolves config paths correctly", + "org.apache.spark.deploy.SparkSubmitSuite @ support glob path", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-27575: yarn confs should merge new value with existing value", + "org.apache.spark.deploy.SparkSubmitSuite @ Avoid re-upload remote resources in yarn client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ download remote resource if it is not supported by yarn service", + "org.apache.spark.deploy.SparkSubmitSuite @ avoid downloading remote resource if it is supported by yarn service", + "org.apache.spark.deploy.SparkSubmitSuite @ force download from forced schemes", + "org.apache.spark.deploy.SparkSubmitSuite @ force download for all the schemes", + "org.apache.spark.deploy.SparkSubmitSuite @ start SparkApplication without modifying system properties", + "org.apache.spark.deploy.SparkSubmitSuite @ support --py-files/spark.submit.pyFiles in non pyspark application", + "org.apache.spark.deploy.SparkSubmitSuite @ handles natural line delimiters in --properties-file and --conf uniformly" + ], + "spark.driver.memory": [ + "org.apache.spark.deploy.SparkSubmitSuite @ handles YARN cluster mode", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-33530: handles standalone mode with archives", + "org.apache.spark.deploy.SparkSubmitSuite @ handles k8s cluster mode" + ], + "spark.files.overwrite": [ + "org.apache.spark.deploy.SparkSubmitSuite @ automatically sets mainClass if primary resource is S3 JAR in client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ error informatively when mainClass isn't set and S3 JAR doesn't exist", + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.deploy.SparkSubmitSuite @ downloadFile - invalid url", + "org.apache.spark.deploy.SparkSubmitSuite @ downloadFile - file doesn't exist", + "org.apache.spark.deploy.SparkSubmitSuite @ download one file to local", + "org.apache.spark.deploy.SparkSubmitSuite @ download list of files to local", + "org.apache.spark.deploy.SparkSubmitSuite @ Avoid re-upload remote resources in yarn client mode", + "org.apache.spark.deploy.SparkSubmitSuite @ download remote resource if it is not supported by yarn service", + "org.apache.spark.deploy.SparkSubmitSuite @ avoid downloading remote resource if it is supported by yarn service", + "org.apache.spark.deploy.SparkSubmitSuite @ force download from forced schemes", + "org.apache.spark.deploy.SparkSubmitSuite @ force download for all the schemes", + "org.apache.spark.deploy.SparkSubmitSuite @ support --py-files/spark.submit.pyFiles in non pyspark application", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile" + ], + "spark.files.useFetchCache": [ + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.SparkContextSuite @ basic case for addFile and listFiles", + "org.apache.spark.SparkContextSuite @ SPARK-33530: basic case for addArchive and listArchives", + "org.apache.spark.SparkContextSuite @ addFile recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces with recursive works", + "org.apache.spark.SparkContextSuite @ SPARK-30126: addFile when file path contains spaces without recursive works", + "org.apache.spark.SparkContextSuite @ cannot call addFile with different paths that have the same filename", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ addFile can be called twice with same file in non-local-mode (SPARK-16787)", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed", + "org.apache.spark.SparkContextSuite @ SPARK-35691: addFile/addJar/addDirectory should put CanonicalFile" + ], + "spark.files.io.connectionTimeout": [ + "org.apache.spark.deploy.SparkSubmitSuite @ SPARK-21568 ConsoleProgressBar should be enabled only in shells", + "org.apache.spark.rpc.netty.NettyRpcEnvSuite @ file server", + "org.apache.spark.SparkContextSuite @ SPARK-34225: addFile/addJar shouldn't further encode URI if a URI form string is passed" + ], + "spark.pyspark.driver.python": [ + "org.apache.spark.deploy.SparkSubmitSuite @ resolves command line argument paths correctly" + ], + "spark.pyspark.python": [ + "org.apache.spark.deploy.SparkSubmitSuite @ resolves command line argument paths correctly" + ], + "spark.driver.userClassPathFirst": [ + "org.apache.spark.deploy.SparkSubmitSuite @ start SparkApplication without modifying system properties" + ], + "spark.dynamicAllocation.initialExecutors": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.UtilsSuite @ getDynamicAllocationInitialExecutors" + ], + "spark.dynamicAllocation.cachedExecutorIdleTimeout": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ basic executor timeout", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track tasks running on executor", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ use appropriate time out depending on whether blocks are stored", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ keeps track of stored blocks for each rdd and split", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ handle timeouts correctly with multiple executors", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-38019: timedOutExecutors should be deterministic", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-27677: don't track blocks stored on disk when using shuffle service", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track executors pending for removal", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle block tracking", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28839: Avoids NPE in context cleaner when shuffle service is on", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle tracking with multiple executors and concurrent jobs", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28455: avoid overflow in timeout calculation", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-37688: ignore SparkListenerBlockUpdated event if executor was not active", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage" + ], + "spark.dynamicAllocation.maxExecutors": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage" + ], + "spark.dynamicAllocation.sustainedSchedulerBacklogTimeout": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage" + ], + "spark.dynamicAllocation.shuffleTracking.timeout": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ basic executor timeout", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track tasks running on executor", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ use appropriate time out depending on whether blocks are stored", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ keeps track of stored blocks for each rdd and split", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ handle timeouts correctly with multiple executors", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-38019: timedOutExecutors should be deterministic", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-27677: don't track blocks stored on disk when using shuffle service", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track executors pending for removal", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle block tracking", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28839: Avoids NPE in context cleaner when shuffle service is on", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle tracking with multiple executors and concurrent jobs", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28455: avoid overflow in timeout calculation", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-37688: ignore SparkListenerBlockUpdated event if executor was not active", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage" + ], + "spark.dynamicAllocation.minExecutors": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage", + "org.apache.spark.util.UtilsSuite @ getDynamicAllocationInitialExecutors" + ], + "spark.dynamicAllocation.executorIdleTimeout": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ basic executor timeout", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track tasks running on executor", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ use appropriate time out depending on whether blocks are stored", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ keeps track of stored blocks for each rdd and split", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ handle timeouts correctly with multiple executors", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-38019: timedOutExecutors should be deterministic", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-27677: don't track blocks stored on disk when using shuffle service", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ track executors pending for removal", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle block tracking", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28839: Avoids NPE in context cleaner when shuffle service is on", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle tracking with multiple executors and concurrent jobs", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28455: avoid overflow in timeout calculation", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-37688: ignore SparkListenerBlockUpdated event if executor was not active", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage" + ], + "spark.dynamicAllocation.schedulerBacklogTimeout": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage" + ], + "spark.dynamicAllocation.executorAllocationRatio": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ initial executor limit", + "org.apache.spark.ExecutorAllocationManagerSuite @ initialize dynamic allocation in SparkContext", + "org.apache.spark.ExecutorAllocationManagerSuite @ verify min/max executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting state", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors default profile", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors multiple profiles initial num same as needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors multiple profiles", + "org.apache.spark.ExecutorAllocationManagerSuite @ executionAllocationRatio is correctly handled", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors capped by num pending tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ add executors when speculative tasks added", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: one stage being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: multiple stages being unschedulable", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-31418: remove executors after unschedulable tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-30511 remove executors when speculative tasks end", + "org.apache.spark.ExecutorAllocationManagerSuite @ properly handle task end events from completed stages", + "org.apache.spark.ExecutorAllocationManagerSuite @ cancel pending executors when no longer needed", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled=false)", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-33763: metrics to track dynamic allocation (decommissionEnabled = true)", + "org.apache.spark.ExecutorAllocationManagerSuite @ remove multiple executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ Removing with various numExecutorsTargetForDefaultProfileId condition", + "org.apache.spark.ExecutorAllocationManagerSuite @ interleaving add and remove", + "org.apache.spark.ExecutorAllocationManagerSuite @ starting/canceling add timer", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop with no events", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop add behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove behavior", + "org.apache.spark.ExecutorAllocationManagerSuite @ mock polling loop remove with decommissioning", + "org.apache.spark.ExecutorAllocationManagerSuite @ listeners trigger add executors correctly", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp up when target < running executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until first job is submitted", + "org.apache.spark.ExecutorAllocationManagerSuite @ avoid ramp down initial executors until idle executor is timeout", + "org.apache.spark.ExecutorAllocationManagerSuite @ get pending task number and related locality preference", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-8366: maxNumExecutorsNeededPerResourceProfile should properly handle failed tasks", + "org.apache.spark.ExecutorAllocationManagerSuite @ reset the state of allocation manager", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-23365 Don't update target num executors when killing idle executors", + "org.apache.spark.ExecutorAllocationManagerSuite @ SPARK-26758 check executor target number after idle time out", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage" + ], + "spark.excludeOnFailure.timeout": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition" + ], + "spark.excludeOnFailure.stage.maxFailedExecutorsPerNode": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSetExcludelistSuite @ multiple attempts for the same task count once", + "org.apache.spark.scheduler.TaskSetExcludelistSuite @ only exclude nodes for the task set when all the excluded executors are all on same host", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition" + ], + "spark.excludeOnFailure.application.maxFailedTasksPerExecutor": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition" + ], + "spark.excludeOnFailure.application.fetchFailure.enabled": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition" + ], + "spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSetExcludelistSuite @ multiple attempts for the same task count once", + "org.apache.spark.scheduler.TaskSetExcludelistSuite @ only exclude nodes for the task set when all the excluded executors are all on same host", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition" + ], + "spark.excludeOnFailure.application.maxFailedExecutorsPerNode": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition" + ], + "spark.excludeOnFailure.task.maxTaskAttemptsPerNode": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSetExcludelistSuite @ multiple attempts for the same task count once", + "org.apache.spark.scheduler.TaskSetExcludelistSuite @ only exclude nodes for the task set when all the excluded executors are all on same host", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition" + ], + "spark.excludeOnFailure.stage.maxFailedTasksPerExecutor": [ + "org.apache.spark.deploy.StandaloneDynamicAllocationSuite @ executor registration on a excluded host must fail", + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ executors aren't excluded as a result of tasks in failed task sets", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage failure", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failure timeout works as expected for long-running tasksets", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude still respects legacy configs", + "org.apache.spark.scheduler.HealthTrackerSuite @ check exclude configuration invariants", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey task and stage excludelist", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ scheduled tasks obey node and executor excludelists", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage when all executors are excluded and we cannot acquire new executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-31418 abort timer should kick in when task is completely excluded &allocation manager could not acquire a new executor before the timeout", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded node for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 0", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 1", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 2", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 3", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 4", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 5", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 6", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 7", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 8", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ Excluded executor for entire task set prevents per-task exclusion checks: iteration 9", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ abort stage if executor loss results in unschedulability from previously failed tasks", + "org.apache.spark.scheduler.TaskSetExcludelistSuite @ multiple attempts for the same task count once", + "org.apache.spark.scheduler.TaskSetExcludelistSuite @ only exclude nodes for the task set when all the excluded executors are all on same host", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition" + ], + "spark.shuffle.io.maxRetries": [ + "org.apache.spark.DistributedSuite @ caching (encryption = off)", + "org.apache.spark.DistributedSuite @ caching (encryption = on)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = off)", + "org.apache.spark.DistributedSuite @ caching on disk (encryption = on)", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security default off", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on same password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security on mismatch password", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on server", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security mismatch auth off on client", + "org.apache.spark.network.netty.NettyBlockTransferSecuritySuite @ security with aes encryption", + "org.apache.spark.network.netty.NettyBlockTransferServiceSuite @ SPARK-27637: test fetch block with executor dead", + "org.apache.spark.rdd.DoubleRDDSuite @ WorksWithHugeRange", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKey", + "org.apache.spark.rdd.PairRDDFunctionsSuite @ sampleByKeyExact", + "org.apache.spark.rdd.RDDSuite @ takeSample", + "org.apache.spark.scheduler.SparkListenerSuite @ onTaskGettingResult() called when result fetched remotely", + "org.apache.spark.scheduler.TaskResultGetterSuite @ handling results larger than max RPC message size", + "org.apache.spark.scheduler.TaskResultGetterSuite @ task retried if result missing from block manager", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.serializer.KryoSerializerResizableOutputSuite @ kryo with resizable output buffer should succeed on large array", + "org.apache.spark.StatusTrackerSuite @ getJobIdsForGroup()", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-9591: getRemoteBytes from another location when Exception throw", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-14252: getOrElseUpdate should still read from remote storage", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-17484: master block locations are updated following an invalid remote block fetch" + ], + "spark.files.openCostInBytes": [ + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)" + ], + "spark.files.maxPartitionBytes": [ + "org.apache.spark.FileSuite @ binary file input as byte array", + "org.apache.spark.FileSuite @ portabledatastream caching tests", + "org.apache.spark.FileSuite @ portabledatastream persist disk storage", + "org.apache.spark.FileSuite @ portabledatastream flatmap tests", + "org.apache.spark.FileSuite @ SPARK-22357 test binaryFiles minPartitions", + "org.apache.spark.FileSuite @ minimum split size per node and per rack should be less than or equal to maxSplitSize", + "org.apache.spark.SparkContextSuite @ Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)" + ], + "spark.shuffle.push.numPushThreads": [ + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32921: test new protocol changes fetching both Map and Merge status in single RPC", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations not empty", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: merger locations reuse from shuffle dependency", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Ensure child stage should not start before all the parent stages are completed with shuffle merge finalized for all the parent stages", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Empty RDD should not be computed", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Cancelled stage should be marked finalized after the shuffle merge is finalized", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: SPARK-35549: Merge results should not get registered after shuffle merge finalization", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable push based shuffle in the case of a barrier stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32923: handle stage failure for indeterminate map stage with push-based shuffle", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after stage completion", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization triggered after minimum threshold push complete", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization behavior with stage cancellation for determinate and indeterminate stages during spark.shuffle.push.finalize.timeout wait", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-33701: check adaptive shuffle merge finalization with minimum pushes complete after the stage completion replacing the finalize task with delay = 0", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: corrupted merged shuffle block FetchFailure should unregister merge results", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is true", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-40096: Send finalize events even if shuffle merger blocks indefinitely with registerMergeResults is false" + ], + "spark.shuffle.accurateBlockThreshold": [ + "org.apache.spark.MapOutputTrackerSuite @ SPARK-32210: serialize mapStatuses to a nested Array and deserialize them", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-22540: ensure HighlyCompressedMapStatus calculates correct avgSize", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-36967: HighlyCompressedMapStatus should record accurately the size of skewed shuffle blocks", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-36967: Limit accurate skewed block number if too many blocks are skewed", + "org.apache.spark.serializer.KryoSerializerSuite @ registration of HighlyCompressedMapStatus", + "org.apache.spark.serializer.UnsafeKryoSerializerSuite @ registration of HighlyCompressedMapStatus" + ], + "spark.shuffle.spill.diskWriteBufferSize": [ + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching before checkpointing", + "org.apache.spark.rdd.LocalCheckpointSuite @ checkpoint without draining iterator - caching after checkpointing", + "org.apache.spark.rdd.RDDSuite @ collect large number of empty partitions", + "org.apache.spark.scheduler.MapStatusSuite @ SPARK-21133 HighlyCompressedMapStatus#writeExternal throws NPE", + "org.apache.spark.shuffle.sort.ShuffleExternalSorterSuite @ nested spill should be no-op", + "org.apache.spark.util.collection.ExternalAppendOnlyMapSuite @ force to spill for external aggregation", + "org.apache.spark.util.collection.ExternalSorterSuite @ sorting updates peak execution memory", + "org.apache.spark.util.collection.ExternalSorterSuite @ force to spill for external sorter" + ], + "spark.shuffle.push.mergersMinStaticThreshold": [ + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Disable shuffle merge due to not enough mergers available", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency should not cause DAGScheduler to hang", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: Reused ShuffleDependency with Shuffle Merge disabled for the corresponding ShuffleDependency with shuffle data loss should recompute missing partitions", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-32920: metadata fetch failure should not unregister map status", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-34826: Adaptively fetch shuffle mergers with stage retry for indeterminate stage", + "org.apache.spark.scheduler.DAGSchedulerSuite @ SPARK-38987: All shuffle outputs for a shuffle push merger executor should be cleaned up on a fetch failure whenspark.files.fetchFailure.unRegisterOutputOnHost is true" + ], + "spark.dynamicAllocation.shuffleTracking.enabled": [ + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle block tracking", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ shuffle tracking with multiple executors and concurrent jobs", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-28455: avoid overflow in timeout calculation", + "org.apache.spark.scheduler.dynalloc.ExecutorMonitorSuite @ SPARK-37688: ignore SparkListenerBlockUpdated event if executor was not active", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Worker decommission and executor idle timeout", + "org.apache.spark.scheduler.WorkerDecommissionExtendedSuite @ Decommission 2 executors from 3 executors in total", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage" + ], + "spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout": [ + "org.apache.spark.scheduler.HealthTrackerIntegrationSuite @ SPARK-15865 Progress with fewer executors than maxTaskFailures", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should kick in when task is completely excluded & no new executor can be acquired", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor", + "org.apache.spark.scheduler.TaskSchedulerImplSuite @ SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets" + ], + "spark.excludeOnFailure.killExcludedExecutors": [ + "org.apache.spark.scheduler.HealthTrackerSuite @ executors can be excluded with only a few failures per stage", + "org.apache.spark.scheduler.HealthTrackerSuite @ stage exclude updates correctly on stage success", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluded executors and nodes get recovered with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ exclude can handle lost executors", + "org.apache.spark.scheduler.HealthTrackerSuite @ task failures expire with time", + "org.apache.spark.scheduler.HealthTrackerSuite @ only exclude nodes for the application when enough executors have failed on that specific host", + "org.apache.spark.scheduler.HealthTrackerSuite @ excluding decommission and kills executors when enabled", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch" + ], + "spark.locality.wait": [ + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSet with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ multiple offers with no preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ skip unsatisfiable locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ basic delay scheduling", + "org.apache.spark.scheduler.TaskSetManagerSuite @ we do not need to delay scheduling when we only have noPref tasks in the queue", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with fallback", + "org.apache.spark.scheduler.TaskSetManagerSuite @ delay scheduling with failed hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ task result lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ repeated failures lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Executors exit for reason unrelated to currently running tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-31837: Shift to the new highest locality level if there is when recomputeLocality", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned host should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32653: Decommissioned executor should not be used to calculate locality levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ do not emit warning when serialized task is small", + "org.apache.spark.scheduler.TaskSetManagerSuite @ emit warning when serialized task is large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Not serializable exception thrown if the task cannot be serialized", + "org.apache.spark.scheduler.TaskSetManagerSuite @ abort the job if total size of results is too large", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32470: do not check total size of intermediate stages", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ speculative and noPref task should be scheduled after node-local", + "org.apache.spark.scheduler.TaskSetManagerSuite @ node-local tasks should be scheduled right away when there are only node-local and no-preference tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Ensure TaskSetManager is usable after addition of levels", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Kill other task attempts when one attempt belonging to the same task succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ Killing speculative tasks does not count towards aborting the taskset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-19868: DagScheduler only notified of taskEnd when state is ready", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update application healthTracker for shuffle-fetch", + "org.apache.spark.scheduler.TaskSetManagerSuite @ update healthTracker before adding pending task to avoid race condition", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21563 context's added jars shouldn't change mid-TaskSet", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskSetManager passes task resource along", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task is submitted only once for execution", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-21040: Check speculative tasks are launched when an executor is decommissioned and the tasks running on it cannot finish within EXECUTOR_DECOMMISSION_KILL_INTERVAL", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ TaskOutputFileAlreadyExistException lead to task set abortion", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-30359: don't clean executorsPendingToRemove at the beginning of CoarseGrainedSchedulerBackend.reset", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-40094: Send TaskEnd if task failed with NotSerializableException or TaskOutputFileAlreadyExistException" + ], + "spark.locality.wait.rack": [ + "org.apache.spark.scheduler.TaskSetManagerSuite @ new executors get added and lost", + "org.apache.spark.scheduler.TaskSetManagerSuite @ test RACK_LOCAL tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-13704 Rack Resolution is done with a batch of de-duped hosts" + ], + "spark.speculation.efficiency.enabled": [ + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie", + "org.apache.spark.scheduler.TaskSetManagerSuite @ [SPARK-22074] Task killed by other attempt task should not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24677: Avoid NoSuchElementException from MedianHeap", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-24755 Executor loss can cause task to not be resubmitted", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-26755 Ensure that a speculative task obeys original locality preferences", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-29976 Regular speculation configs should still take effect even when a threshold is provided", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-33741 Test minimum amount of time a task runs before being considered for speculation", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test speculation for TaskSet with single task", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_MIN_THRESHOLD for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_PROCESS_RATE_MULTIPLIER for speculating inefficient tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-32170: test SPECULATION_EFFICIENCY_TASK_DURATION_FACTOR for speculating tasks", + "org.apache.spark.scheduler.TaskSetManagerSuite @ SPARK-37580: Reset numFailures when one of task attempts succeeds" + ], + "spark.shuffle.push.maxBlockBatchSize": [ + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ A batch of blocks is limited by maxBlocksBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are excluded in the preparation", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in a push request are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-33701: Ensure all the blocks are pushed before notifying driver about push completion", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Basic block push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are skipped for push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in flight per address are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Hit maxBlocksInFlightPerAddress limit so that the blocks are deferred", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of shuffle blocks grouped in a single push request is limited by maxBlockBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error retries", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error logging", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Blocks are continued to push even when a block push fails with collision exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ More blocks are not pushed when a block push fails with too late exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Connect exceptions remove all the push requests for that host", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-36255: FileNotFoundException stops the push" + ], + "spark.shuffle.push.maxBlockSizeToPush": [ + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ A batch of blocks is limited by maxBlocksBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are excluded in the preparation", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in a push request are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-33701: Ensure all the blocks are pushed before notifying driver about push completion", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Basic block push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Large blocks are skipped for push", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of blocks in flight per address are limited by maxBlocksInFlightPerAddress", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Hit maxBlocksInFlightPerAddress limit so that the blocks are deferred", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Number of shuffle blocks grouped in a single push request is limited by maxBlockBatchSize", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error retries", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Error logging", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Blocks are continued to push even when a block push fails with collision exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ More blocks are not pushed when a block push fails with too late exception", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ Connect exceptions remove all the push requests for that host", + "org.apache.spark.shuffle.ShuffleBlockPusherSuite @ SPARK-36255: FileNotFoundException stops the push" + ], + "spark.driver.extraClassPath": [ + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (java)", + "org.apache.spark.SparkConfSuite @ SPARK-17240: SparkConf should be serializable (kryo)" + ], + "spark.broadcast.UDFCompressionThreshold": [ + "org.apache.spark.SparkConfSuite @ SPARK-28355: Use Spark conf for threshold at which UDFs are compressed by broadcast" + ], + "spark.storage.decommission.shuffleBlocks.enabled": [ + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test that with no blocks we finish migration", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no migrations configured", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no peers", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with only shuffle files time moves forward", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager does not re-add removed shuffle files", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ SPARK-40168: block decom manager handles shuffle file not found", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager handles IO failures", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager short circuits removed blocks", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test shuffle and cached rdd migration without any error", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage" + ], + "spark.storage.decommission.rddBlocks.enabled": [ + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test that with no blocks we finish migration", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no migrations configured", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no peers", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with only shuffle files time moves forward", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager does not re-add removed shuffle files", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ SPARK-40168: block decom manager handles shuffle file not found", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager handles IO failures", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager short circuits removed blocks", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test shuffle and cached rdd migration without any error", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage" + ], + "spark.storage.decommission.shuffleBlocks.maxThreads": [ + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test that with no blocks we finish migration", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with no peers", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager with only shuffle files time moves forward", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager does not re-add removed shuffle files", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ SPARK-40168: block decom manager handles shuffle file not found", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager handles IO failures", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ block decom manager short circuits removed blocks", + "org.apache.spark.storage.BlockManagerDecommissionUnitSuite @ test shuffle and cached rdd migration without any error", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should migrate all cached blocks", + "org.apache.spark.storage.BlockManagerSuite @ test decommissionRddCacheBlocks should keep the block if it is not able to migrate", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - no limit", + "org.apache.spark.storage.BlockManagerSuite @ test migration of shuffle blocks during decommissioning - larger limit", + "org.apache.spark.storage.BlockManagerSuite @ [SPARK-34363]test migration of shuffle blocks during decommissioning - small limit", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-33387 Support ordered shuffle block migration", + "org.apache.spark.storage.FallbackStorageSuite @ migrate shuffle data to fallback storage" + ], + "spark.shuffle.registration.maxAttempts": [ + "org.apache.spark.storage.BlockManagerSuite @ SPARK-20640: Shuffle registration timeout and maxAttempts conf are working", + "org.apache.spark.storage.BlockManagerSuite @ SPARK-39647: Failure to register with ESS should prevent registering the BM" + ], + "spark.storage.decommission.fallbackStorage.cleanUp": [ + "org.apache.spark.storage.FallbackStorageSuite @ SPARK-34142: fallback storage API - cleanUp", + "org.apache.spark.storage.FallbackStorageSuite @ Upload from all decommissioned executors", + "org.apache.spark.storage.FallbackStorageSuite @ Upload multi stages", + "org.apache.spark.storage.FallbackStorageSuite @ lz4 - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ lzf - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ snappy - Newly added executors should access old data from remote storage", + "org.apache.spark.storage.FallbackStorageSuite @ zstd - Newly added executors should access old data from remote storage" + ], + "spark.executor.logs.rolling.maxRetainedFiles": [ + "org.apache.spark.util.FileAppenderSuite @ rolling file appender - time-based rolling", + "org.apache.spark.util.FileAppenderSuite @ rolling file appender - time-based rolling (compressed)", + "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - time-based rolling close stream", + "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - size-based rolling close stream", + "org.apache.spark.util.FileAppenderSuite @ rolling file appender - size-based rolling", + "org.apache.spark.util.FileAppenderSuite @ rolling file appender - size-based rolling (compressed)", + "org.apache.spark.util.FileAppenderSuite @ rolling file appender - cleaning", + "org.apache.spark.util.FileAppenderSuite @ file appender selection" + ], + "spark.executor.logs.rolling.enableCompression": [ + "org.apache.spark.util.FileAppenderSuite @ rolling file appender - time-based rolling", + "org.apache.spark.util.FileAppenderSuite @ rolling file appender - time-based rolling (compressed)", + "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - time-based rolling close stream", + "org.apache.spark.util.FileAppenderSuite @ SPARK-35027: rolling file appender - size-based rolling close stream", + "org.apache.spark.util.FileAppenderSuite @ rolling file appender - size-based rolling", + "org.apache.spark.util.FileAppenderSuite @ rolling file appender - size-based rolling (compressed)", + "org.apache.spark.util.FileAppenderSuite @ rolling file appender - cleaning", + "org.apache.spark.util.FileAppenderSuite @ file appender selection" + ], + "spark.driver.log.allowErasureCoding": [ + "org.apache.spark.util.logging.DriverLoggerSuite @ driver logs are persisted locally and synced to dfs" + ] +} \ No newline at end of file